prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
from model_compression_toolkit.common.collectors.statistics_collector import StatsCollector
from model_compression_toolkit.common.collectors.statistics_collector import scale_statistics
from model_compression_toolkit.common.collectors.statistics_collector import shift_statistics
from model_compression_toolkit.common.framework_info import ChannelAxis
def init_stats_container(num_of_input_channels, init_min=None, init_max=None):
sc = StatsCollector(init_min_value=init_min, init_max_value=init_max, output_channel_index=ChannelAxis.NHWC)
x = np.random.rand(1, 2, 3, num_of_input_channels)
for i in range(100):
sc.update_statistics(x)
return sc
def scale_stats_container(sc, num_of_scaling_factors):
scaling_factor = np.random.random(num_of_scaling_factors)
scaled_sc = scale_statistics(sc, scaling_factor)
return scaled_sc, scaling_factor
def shift_stats_container(sc, num_of_shifting_factors):
shifting_factor = np.random.random(num_of_shifting_factors)
shifted_sc = shift_statistics(sc, shifting_factor)
return shifted_sc, shifting_factor
class TestCollectorsManipulations(unittest.TestCase):
########### Test scaling ###########
def test_mean_scale_per_channel(self, num_of_scaling_factors=10):
sc = init_stats_container(num_of_scaling_factors)
mean = sc.get_mean()
scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)
scaled_mean = scaled_sc.get_mean()
self.assertTrue(np.allclose(scaled_mean / scaling_factor, mean))
def test_mean_scale_per_tensor(self, num_of_scaling_factors=1):
sc = init_stats_container(num_of_scaling_factors)
mean = sc.get_mean()
scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)
scaled_mean = scaled_sc.get_mean()
self.assertTrue(np.allclose(scaled_mean / scaling_factor, mean))
def test_histogram_scale_per_channel(self, num_of_scaling_factors=10):
sc = init_stats_container(num_of_scaling_factors)
bins, _ = sc.hc.get_histogram()
scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)
with self.assertRaises(Exception):
scaled_sc.hc.get_histogram() # data is corrupted. expect exception
def test_histogram_scale_per_tensor(self, num_of_scaling_factors=1):
sc = init_stats_container(num_of_scaling_factors)
bins, _ = sc.hc.get_histogram()
scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)
scaled_bins, _ = scaled_sc.hc.get_histogram()
self.assertTrue(np.allclose(scaled_bins / scaling_factor, bins))
def test_min_max_scale_per_channel(self, num_of_scaling_factors=10):
sc = init_stats_container(num_of_scaling_factors)
min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel
scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)
min_pc_scaled, max_pc_scaled = scaled_sc.mpcc.min_per_channel, scaled_sc.mpcc.max_per_channel
self.assertTrue(np.allclose(min_pc_scaled / scaling_factor, min_pc))
self.assertTrue( | np.allclose(max_pc_scaled / scaling_factor, max_pc) | numpy.allclose |
from sklearn.metrics.pairwise import rbf_kernel
from scipy.stats import ks_2samp
from scipy.stats import wilcoxon
import numpy as np
import random
from scipy import stats
import time
from collections import defaultdict
import numpy as np
import warnings
from scipy.stats import rankdata
def same(x):
return x
def cube(x):
return np.power(x, 3)
def negexp(x):
return np.exp(-np.abs(x))
def generate_samples_random(size=1000, sType='CI', dx=1, dy=1, dz=20, nstd=1, fixed_function='linear',
debug=False, normalize = True, seed = None, dist_z = 'gaussian'):
'''Generate CI,I or NI post-nonlinear samples
1. Z is independent Gaussian or Laplace
2. X = f1(<a,Z> + b + noise) and Y = f2(<c,Z> + d + noise) in case of CI
Arguments:
size : number of samples
sType: CI, I, or NI
dx: Dimension of X
dy: Dimension of Y
dz: Dimension of Z
nstd: noise standard deviation
f1, f2 to be within {x,x^2,x^3,tanh x, e^{-|x|}, cos x}
Output:
Samples X, Y, Z
'''
if seed == None:
np.random.seed()
else:
np.random.seed(seed)
if fixed_function == 'linear':
f1 = same
f2 = same
else:
I1 = random.randint(2, 6)
I2 = random.randint(2, 6)
if I1 == 2:
f1 = np.square
elif I1 == 3:
f1 = cube
elif I1 == 4:
f1 = np.tanh
elif I1 == 5:
f1 = negexp
else:
f1 = np.cos
if I2 == 2:
f2 = np.square
elif I2 == 3:
f2 = cube
elif I2 == 4:
f2 = np.tanh
elif I2 == 5:
f2 = negexp
else:
f2 = np.cos
if debug:
print(f1, f2)
num = size
if dist_z =='gaussian':
cov = np.eye(dz)
mu = np.ones(dz)
Z = np.random.multivariate_normal(mu, cov, num)
Z = np.matrix(Z)
elif dist_z == 'laplace':
Z = np.random.laplace(loc=0.0, scale=1.0, size=num*dz)
Z = np.reshape(Z,(num,dz))
Z = np.matrix(Z)
Ax = np.random.rand(dz, dx)
for i in range(dx):
Ax[:, i] = Ax[:, i] / np.linalg.norm(Ax[:, i], ord=1)
Ax = np.matrix(Ax)
Ay = np.random.rand(dz, dy)
for i in range(dy):
Ay[:, i] = Ay[:, i] / np.linalg.norm(Ay[:, i], ord=1)
Ay = np.matrix(Ay)
Axy = np.random.rand(dx, dy)
for i in range(dy):
Axy[:, i] = Axy[:, i] / np.linalg.norm(Axy[:, i], ord=1)
Axy = np.matrix(Axy)
temp = Z * Ax
m = np.mean(np.abs(temp))
nstd = nstd * m
if sType == 'CI':
X = f1(Z * Ax + nstd * np.random.multivariate_normal(np.zeros(dx), np.eye(dx), num))
Y = f2(Z * Ay + nstd * np.random.multivariate_normal(np.zeros(dy), np.eye(dy), num))
elif sType == 'I':
X = f1(nstd * np.random.multivariate_normal(np.zeros(dx), np.eye(dx), num))
Y = f2(nstd * np.random.multivariate_normal(np.zeros(dy), np.eye(dy), num))
else:
X = np.random.multivariate_normal(np.zeros(dx), np.eye(dx), num)
Y = f2(2 * X * Axy + Z * Ay)
if normalize == True:
Z = (Z - Z.min()) / (Z.max() - Z.min())
X = (X - X.min()) / (X.max() - X.min())
Y = (Y - Y.min()) / (Y.max() - Y.min())
return np.array(X), np.array(Y), np.array(Z)
def pc_ks(pvals):
""" Compute the area under power curve and the Kolmogorov-Smirnoff
test statistic of the hypothesis that pvals come from the uniform
distribution with support (0, 1).
"""
if pvals.size == 0:
return [-1, -1]
if -1 in pvals or -2 in pvals:
return [-1, -1]
pvals = np.sort(pvals)
cdf = ecdf(pvals)
auc = 0
for (pv1, pv2) in zip(pvals[:-1], pvals[1:]):
auc += integrate.quad(cdf, pv1, pv2)[0]
auc += integrate.quad(cdf, pvals[-1], 1)[0]
_, ks = kstest(pvals, 'uniform')
return auc, ks
def np2r(x):
""" Convert a numpy array to an R matrix.
Args:
x (dim0, dim1): A 2d numpy array.
Returns:
x_r: An rpy2 object representing an R matrix isometric to x.
"""
if 'rpy2' not in sys.modules:
raise ImportError(("rpy2 is not installed.",
" Cannot convert a numpy array to an R vector."))
try:
dim0, dim1 = x.shape
except IndexError:
raise IndexError("Only 2d arrays are supported")
return R.r.matrix(R.FloatVector(x.flatten()), nrow=dim0, ncol=dim1)
def fdr(truth, pred, axis=None):
""" Computes False discovery rate
"""
return ((pred==1) & (truth==0)).sum(axis=axis) / pred.sum(axis=axis).astype(float).clip(1,np.inf)
def tpr(truth, pred, axis=None):
""" Computes true positive rate
"""
return ((pred==1) & (truth==1)).sum(axis=axis) / truth.sum(axis=axis).astype(float).clip(1,np.inf)
def true_positives(truth, pred, axis=None):
""" Computes number of true positive
"""
return ((pred==1) & (truth==1)).sum(axis=axis)
def false_positives(truth, pred, axis=None):
""" Computes number of false positive
"""
return ((pred==1) & (truth==0)).sum(axis=axis)
def bh(p, fdr):
""" From vector of p-values and desired false positive rate,
returns significant p-values with Benjamini-Hochberg correction
"""
p_orders = np.argsort(p)
discoveries = []
m = float(len(p_orders))
for k, s in enumerate(p_orders):
if p[s] <= (k+1) / m * fdr:
discoveries.append(s)
else:
break
return np.array(discoveries, dtype=int)
def mmd_squared(X, Y, gamma = 1):
X = X.reshape((len(X)), 1)
Y = Y.reshape((len(Y)), 1)
K_XX = rbf_kernel(X, gamma=gamma)
K_YY = rbf_kernel(Y, gamma=gamma)
K_XY = rbf_kernel(X, Y, gamma=gamma)
n = K_XX.shape[0]
m = K_YY.shape[0]
mmd_squared = (np.sum(K_XX)-np.trace(K_XX))/(n*(n-1)) + (np.sum(K_YY)-np.trace(K_YY))/(m*(m-1)) - 2 * np.sum(K_XY) / (m * n)
return mmd_squared
def correlation(X,Y):
X = X.reshape((len(X)))
Y = Y.reshape((len(Y)))
return np.abs(np.corrcoef(X, Y)[0, 1])
def kolmogorov(X,Y):
X = X.reshape((len(X)))
Y = Y.reshape((len(Y)))
return ks_2samp(X, Y)[0]
def wilcox(X,Y):
X = X.reshape((len(X)))
Y = Y.reshape((len(Y)))
return wilcoxon(X, Y)[0]
'''
X = np.random.normal(0,2,500)
Y = np.random.normal(0,2,500)
kolmogorov(X,Y)
'''
def rdc(x, y, f=np.sin, k=20, s=1/6., n=1):
"""
Computes the Randomized Dependence Coefficient
x,y: numpy arrays 1-D or 2-D
If 1-D, size (samples,)
If 2-D, size (samples, variables)
f: function to use for random projection
k: number of random projections to use
s: scale parameter
n: number of times to compute the RDC and
return the median (for stability)
According to the paper, the coefficient should be relatively insensitive to
the settings of the f, k, and s parameters.
Source: https://github.com/garydoranjr/rdc
"""
x = x.reshape((len(x)))
y = y.reshape((len(y)))
if n > 1:
values = []
for i in range(n):
try:
values.append(rdc(x, y, f, k, s, 1))
except np.linalg.linalg.LinAlgError: pass
return np.median(values)
if len(x.shape) == 1: x = x.reshape((-1, 1))
if len(y.shape) == 1: y = y.reshape((-1, 1))
# Copula Transformation
cx = np.column_stack([rankdata(xc, method='ordinal') for xc in x.T])/float(x.size)
cy = np.column_stack([rankdata(yc, method='ordinal') for yc in y.T])/float(y.size)
# Add a vector of ones so that w.x + b is just a dot product
O = np.ones(cx.shape[0])
X = np.column_stack([cx, O])
Y = np.column_stack([cy, O])
# Random linear projections
Rx = (s/X.shape[1])*np.random.randn(X.shape[1], k)
Ry = (s/Y.shape[1])*np.random.randn(Y.shape[1], k)
X = np.dot(X, Rx)
Y = np.dot(Y, Ry)
# Apply non-linear function to random projections
fX = f(X)
fY = f(Y)
# Compute full covariance matrix
C = np.cov(np.hstack([fX, fY]).T)
# Due to numerical issues, if k is too large,
# then rank(fX) < k or rank(fY) < k, so we need
# to find the largest k such that the eigenvalues
# (canonical correlations) are real-valued
k0 = k
lb = 1
ub = k
while True:
# Compute canonical correlations
Cxx = C[:k, :k]
Cyy = C[k0:k0+k, k0:k0+k]
Cxy = C[:k, k0:k0+k]
Cyx = C[k0:k0+k, :k]
eigs = np.linalg.eigvals(np.dot(np.dot( | np.linalg.pinv(Cxx) | numpy.linalg.pinv |
import unittest
from datetime import date
from irLib.marketConvention.dayCount import ACT_ACT
from irLib.marketConvention.compounding import annually_k_Spot
from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve
import numpy as np
alias_disC = 'disC'
alias_forC = 'forC'
referenceDate = date(2020, 6, 26)
dayCount = ACT_ACT()
compounding = annually_k_Spot()
allowExtrapolation = False
# set synthetic data
timeIndex = [1, 2, 3, 4, 5]
flatR = 0.03
dF = ((flatR + 1) ** -np.arange(1, 6)).tolist()
forwardRates = (flatR * np.ones(5)).tolist()
spots = (flatR * np.ones(5)).tolist()
yearFrac = np.arange(1, 6).tolist()
par = (flatR * | np.ones(5) | numpy.ones |
import os
import numpy as np
import scipy.sparse as sp
import pickle
import torch
from torch.utils.data import DataLoader
from dgl.data.utils import download, _get_dgl_url, get_download_dir, extract_archive
import random
import time
import dgl
from utils import shuffle_walks
#np.random.seed(3141592653)
def ReadTxtNet(file_path="", undirected=True):
""" Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes
"""
if file_path == 'youtube' or file_path == 'blog':
name = file_path
dir = get_download_dir()
zip_file_path='{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path,
'{}/{}'.format(dir, name))
file_path = "{}/{}/{}-net.txt".format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
net = {}
with open(file_path, "r") as f:
for line in f.readlines():
n1, n2 = list(map(int, line.strip().split(" ")[:2]))
if n1 not in node2id:
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if n2 not in node2id:
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if n1 not in net:
net[n1] = {n2: 1}
src.append(n1)
dst.append(n2)
elif n2 not in net[n1]:
net[n1][n2] = 1
src.append(n1)
dst.append(n2)
if undirected:
if n2 not in net:
net[n2] = {n1: 1}
src.append(n2)
dst.append(n1)
elif n1 not in net[n2]:
net[n2][n1] = 1
src.append(n2)
dst.append(n1)
print("node num: %d" % len(net))
print("edge num: %d" % len(src))
assert max(net.keys()) == len(net) - 1, "error reading net, quit"
sm = sp.coo_matrix(
(np.ones(len(src)), (src, dst)),
dtype=np.float32)
return net, node2id, id2node, sm
def net2graph(net_sm):
""" Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL
"""
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = end - start
print("Building DGLGraph in %.2fs" % t)
return G
class DeepwalkDataset:
def __init__(self,
net_file,
map_file,
walk_length=80,
window_size=5,
num_walks=10,
batch_size=32,
negative=5,
gpus=[0],
fast_neg=True,
):
""" This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the txt network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch
"""
self.walk_length = walk_length
self.window_size = window_size
self.num_walks = num_walks
self.batch_size = batch_size
self.negative = negative
self.num_procs = len(gpus)
self.fast_neg = fast_neg
self.net, self.node2id, self.id2node, self.sm = ReadTxtNet(net_file)
self.save_mapping(map_file)
self.G = net2graph(self.sm)
# random walk seeds
start = time.time()
seeds = torch.cat([torch.LongTensor(self.G.nodes())] * num_walks)
self.seeds = torch.split(shuffle_walks(seeds), int(np.ceil(len(self.net) * self.num_walks / self.num_procs)), 0)
end = time.time()
t = end - start
print("%d seeds in %.2fs" % (len(seeds), t))
# negative table for true negative sampling
if not fast_neg:
node_degree = np.array(list(map(lambda x: len(self.net[x]), self.net.keys())))
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array(node_degree * 1e8, dtype=np.int)
self.neg_table = []
for idx, node in enumerate(self.net.keys()):
self.neg_table += [node] * node_degree[idx]
self.neg_table_size = len(self.neg_table)
self.neg_table = | np.array(self.neg_table, dtype=np.long) | numpy.array |
import os
from typing import Any, Union
import logging
import math
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import cv2
from ipso_phen.ipapi.file_handlers.fh_base import file_handler_factory
from ipso_phen.ipapi.base.image_wrapper import ImageWrapper
import ipso_phen.ipapi.base.ip_common as ipc
from ipso_phen.ipapi.tools.comand_line_wrapper import ArgWrapper
from ipso_phen.ipapi.tools.regions import (
CircleRegion,
RectangleRegion,
EmptyRegion,
Point,
AbstractRegion,
)
from ipso_phen.ipapi.tools.common_functions import time_method, force_directories
matplotlib.use("agg")
KLC_FULLY_INSIDE = dict(val=0, color=ipc.C_GREEN)
KLC_OVERLAPS = dict(val=1, color=ipc.C_BLUE)
KLC_PROTECTED_DIST_OK = dict(val=2, color=ipc.C_LIGHT_STEEL_BLUE)
KLC_PROTECTED_SIZE_OK = dict(val=3, color=ipc.C_CABIN_BLUE)
KLC_OK_TOLERANCE = dict(val=4, color=ipc.C_TEAL)
KLC_NO_BIG_ENOUGH = dict(val=5, color=ipc.C_FUCHSIA)
KLC_NO_CLOSE_ENOUGH = dict(val=6, color=ipc.C_ORANGE)
KLC_OUTSIDE = dict(val=7, color=ipc.C_RED)
KLC_BIG_ENOUGH_TO_IGNORE_DISTANCE = dict(val=8, color=ipc.C_LIME)
logger = logging.getLogger(__name__)
class ImageListHolder:
def __init__(self, file_list, database):
self.image_list = [
file_handler_factory(file_path_, database=database)
for file_path_ in file_list
]
def __len__(self):
return len(self.image_list)
def retrieve_image(self, key, value):
"""Return an image based on key value
:param key: one of the wrapper properties
:param value: value
:return: image
"""
for fh in self.image_list:
if value in fh.value_of(key):
return fh.load_source_file()
return None
class BaseImageProcessor(ImageWrapper):
process_dict = None
def __init__(
self,
file_path: str,
options: ArgWrapper = None,
database=None,
scale_factor=1,
) -> None:
super().__init__(file_path, database)
if options is None:
self._options = {}
else:
self._options = dict(options.__dict__)
self.target_database = database
self.scale_factor = scale_factor
self._source_image = None
self._current_image = None
self.mask = None
self.lock = False
self.image_list = []
self.forced_storage_images_list = []
self._rois_list = []
self._mosaic_data = None
self.data_output = {}
self.good_image = False
self.owner = None
self.msp_images_holder = None
self._built_channels = {}
self.csv_data_holder = self.init_csv_writer()
def init_data_holder(self):
self.csv_data_holder.clear()
self.csv_data_holder.update_csv_value("experiment", self.experiment)
self.csv_data_holder.update_csv_value("plant", self.plant)
self.csv_data_holder.update_csv_value("date_time", self.date_time)
self.csv_data_holder.update_csv_value("camera", self.camera)
self.csv_data_holder.update_csv_value("view_option", self.view_option)
def reset(self):
if self.lock:
return
self.init_data_holder()
self._rois_list = []
self.image_list = []
self.data_output = {}
self._mosaic_data = None
self.store_mosaic = "none"
self._current_image = self.source_image
def init_csv_writer(self):
"""Creates a csv writer with the variables specified in the class
child classes should override this method
:return: Csv writer
"""
return ipc.AbstractCsvWriter()
@staticmethod
def can_process(dict_data: dict) -> bool:
"""
Checks if the class can process the image
:param dict_data: Dictionnary containing filter data
:return: True if current class can process data
"""
return False
def load_source_image(self, store_source=False):
"""
Loads source image and applies corrections if needed
:param store_source: if true image will be stores in image_list
:return:numpy array -- Fixed source image
"""
src_img = self.file_handler.load_source_file()
self.good_image = src_img is not None
if self.good_image:
src_img = self._fix_source_image(src_img)
if self.scale_factor != 1:
src_img = ipc.resize_image(
src_img=src_img,
width=round(src_img.shape[1] * self.scale_factor),
height=round(src_img.shape[0] * self.scale_factor),
keep_aspect_ratio=False,
output_as_bgr=False,
)
if store_source:
self.store_image(src_img, "source")
else:
logger.error("Unable to load source image")
return src_img
def retrieve_msp_images(self):
"""On first call builds the wrappers corresponding to MSP images linked to observation
:return: Number of MSP images available
"""
if self.msp_images_holder is None:
self.msp_images_holder = ImageListHolder(
self.file_handler.linked_images,
self.target_database,
)
if self.msp_images_holder is None:
return 0
else:
return len(self.msp_images_holder)
@staticmethod
def draw_text(
img: Any,
text: str,
fnt_color: tuple = ipc.C_RED,
background_color: Union[None, tuple] = None,
) -> None:
"""Draw text into img, always draws on bottom left portion of the image
Modifies source image
:param img: target image
:param text: text
"""
fnt_face = cv2.FONT_HERSHEY_DUPLEX
fnt_scale = img.shape[0] / 1000
fnt_thickness = max(round(img.shape[0] / 1080), 1)
y = img.shape[0] - 20
for line in reversed(list(text.split("\n"))):
text_size, _ = cv2.getTextSize(line, fnt_face, fnt_scale, fnt_thickness)
if background_color is not None:
cv2.rectangle(
img,
(10, y - text_size[1] - 4),
(text_size[0], y + 4),
background_color,
-1,
)
cv2.putText(
img,
line,
(10, y),
fnt_face,
fnt_scale,
fnt_color,
fnt_thickness,
cv2.LINE_AA,
)
y -= text_size[1] + 8
def draw_image(self, **kwargs):
"""Build pseudo color image
Keyword Arguments:
* normalize_before: Normalize channel
* src_image: Source image
* channel: Channel to transform into pseudo color
* src_mask: Mask
* background: Background selection either 'img' or color tuple
* color_map: color map used
* roi: ignore everything outside ROI
* contour_thickness
* hull_thickness
* straight_bounding_rec_thickness
* enclosing_circle_thickness
* centroid_width
* height_thickness
* width_thickness
:return: drawn image
"""
src = kwargs.get("src_image", self.current_image)
mask = kwargs.get("src_mask", self.mask)
obj = kwargs.get("objects", None)
channel = kwargs.get("channel", "l")
background = kwargs.get("background", "source")
if background == "color":
background = kwargs.get("bcg_color", ipc.C_BLACK)
foreground = kwargs.get("foreground", "source")
if foreground == "color":
foreground = kwargs.get("fore_color", ipc.C_WHITE)
bck_grd_luma = kwargs.get("bck_grd_luma", 100)
normalize_before = bool(kwargs.get("normalize_before", False))
color_map = kwargs.get("color_map", ipc.DEFAULT_COLOR_MAP)
if isinstance(color_map, str):
_, color_map = color_map.split("_")
color_map = int(color_map)
roi = kwargs.get("roi", None)
contour_thickness = kwargs.get("contour_thickness", 0)
hull_thickness = kwargs.get("hull_thickness", 0)
bounding_rec_thickness = kwargs.get("bounding_rec_thickness", 0)
straight_bounding_rec_thickness = kwargs.get("straight_bounding_rec_thickness", 0)
enclosing_circle_thickness = kwargs.get("enclosing_circle_thickness", 0)
centroid_width = kwargs.get("centroid_width", 0)
centroid_line_width = kwargs.get("centroid_line_width", 4)
height_thickness = kwargs.get("height_thickness", 0)
width_thickness = kwargs.get("width_thickness", 0)
# Apply roi to mask
if (roi is not None) and (mask is not None):
mask_ = roi.keep(mask)
elif mask is not None:
mask_ = mask.copy()
else:
mask_ = None
fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)
fnt_scale = src.shape[0] / 1000
fnt_thickness = max(round(src.shape[1] / 1080) * 2, 1)
# Build foreground
if len(src.shape) == 2 or (len(src.shape) == 3 and src.shape[2] == 1):
foreground_img = np.dstack((src, src, src))
elif isinstance(foreground, tuple):
foreground_img = np.full(src.shape, foreground, np.uint8)
elif foreground == "source":
foreground_img = src.copy()
elif foreground == "false_colour":
if isinstance(channel, str):
c = self.get_channel(src, channel)
else:
c = channel.copy()
if mask_ is not None:
c = cv2.bitwise_and(c, c, mask=mask_)
if normalize_before:
c = cv2.equalizeHist(c)
foreground_img = cv2.applyColorMap(c, color_map)
elif foreground == "bw":
foreground_img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
foreground_img = np.dstack((foreground_img, foreground_img, foreground_img))
elif isinstance(foreground, tuple):
foreground_img = np.full(src.shape, foreground, np.uint8)
elif isinstance(foreground, str):
foreground_img = np.full(src.shape, ipc.all_colors_dict[foreground], np.uint8)
else:
logger.error(f"Unknown foreground {background}")
return np.full(src.shape, ipc.C_FUCHSIA, np.uint8)
if mask_ is None:
img = foreground_img.copy()
else:
# Build background
if background == "white":
background_img = np.full(foreground_img.shape, ipc.C_WHITE, np.uint8)
elif background == "black":
background_img = np.full(foreground_img.shape, ipc.C_BLACK, np.uint8)
elif background == "silver":
background_img = np.full(foreground_img.shape, ipc.C_SILVER, np.uint8)
elif background == "bw":
background_img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
background_img = np.dstack(
(background_img, background_img, background_img)
)
elif background == "source":
if len(src.shape) == 2 or (len(src.shape) == 3 and src.shape[2] == 1):
background_img = np.dstack((src, src, src))
else:
background_img = np.copy(src)
elif isinstance(background, tuple):
if len(background) == 3:
background_img = np.full(foreground_img.shape, background, np.uint8)
elif len(background) == 1:
background_img = np.full(
foreground_img.shape,
(background, background, background),
np.uint8,
)
else:
logger.error(f"Unknown color: {background}")
return np.full(foreground_img.shape, ipc.C_FUCHSIA, np.uint8)
else:
logger.error(f"Unknown background {background}")
return np.full(foreground_img.shape, ipc.C_FUCHSIA, np.uint8)
if bck_grd_luma != 100:
bck_grd_luma /= 100
lum, a, b = cv2.split(cv2.cvtColor(background_img, cv2.COLOR_BGR2LAB))
lum = (lum * bck_grd_luma).astype(np.uint)
lum[lum >= 255] = 255
lum = lum.astype(np.uint8)
background_img = cv2.merge((lum, a, b))
background_img = cv2.cvtColor(background_img, cv2.COLOR_LAB2BGR)
# Merge foreground & background
foreground_img = cv2.bitwise_and(foreground_img, foreground_img, mask=mask_)
background_img = cv2.bitwise_and(
background_img, background_img, mask=255 - mask_
)
img = cv2.bitwise_or(foreground_img, background_img)
# Draw contour
if (np.count_nonzero(mask_) > 0) and (
(contour_thickness > 0)
or (hull_thickness > 0)
or (bounding_rec_thickness > 0)
or (straight_bounding_rec_thickness > 0)
or (enclosing_circle_thickness > 0)
or (centroid_width > 0)
or (height_thickness > 0)
or (width_thickness > 0)
):
if obj is None:
id_objects, obj_hierarchy = ipc.get_contours_and_hierarchy(
mask=mask_,
retrieve_mode=cv2.RETR_TREE,
method=cv2.CHAIN_APPROX_NONE,
)
obj, _ = self.object_composition(img, id_objects, obj_hierarchy)
if contour_thickness > 0:
cv2.drawContours(img, obj, -1, ipc.C_FUCHSIA, contour_thickness)
if hull_thickness > 0:
hull = cv2.convexHull(obj)
cv2.drawContours(img, [hull], 0, ipc.C_LIME, hull_thickness)
if bounding_rec_thickness > 0:
rect = cv2.minAreaRect(obj)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img, [box], 0, ipc.C_RED, bounding_rec_thickness)
if straight_bounding_rec_thickness > 0:
x, y, w, h = cv2.boundingRect(obj)
cv2.rectangle(
img, (x, y), (x + w, y + h), ipc.C_PURPLE, bounding_rec_thickness
)
if enclosing_circle_thickness > 0:
(x, y), radius = cv2.minEnclosingCircle(obj)
center = (int(x), int(y))
radius = int(radius)
cv2.circle(
img, center, radius, ipc.C_YELLOW, enclosing_circle_thickness
)
if (
(centroid_width > 0)
or (height_thickness > 0)
or (width_thickness > 0)
):
moments = cv2.moments(mask_, binaryImage=True)
if moments["m00"] != 0:
cmx, cmy = (
moments["m10"] / moments["m00"],
moments["m01"] / moments["m00"],
)
x_, y_, width_, height_ = cv2.boundingRect(obj)
if height_thickness > 0:
cv2.line(
img,
(int(cmx), y_),
(int(cmx), y_ + height_),
ipc.C_CYAN,
height_thickness,
)
if width_thickness > 0:
cv2.line(
img,
(x_, int(cmy)),
(x_ + width_, int(cmy)),
ipc.C_CYAN,
width_thickness,
)
if centroid_width > 0:
cv2.circle(
img,
(int(cmx), int(cmy)),
centroid_width,
ipc.C_BLUE,
centroid_line_width,
)
if bool(kwargs.get("cy_num", False)) is True:
cv2.line(
img,
(int(cmx), 0),
(int(cmx), int(cmy)),
ipc.C_BLUE,
centroid_line_width,
)
cv2.putText(
img,
f"cy: {cmy:.2f}",
(int(cmx) + 5, int(cmy / 2)),
fnt[0],
fnt_scale,
ipc.C_BLUE,
fnt_thickness,
)
if roi is not None:
src_ = src.copy()
src_[roi.top : roi.bottom, roi.left : roi.right] = img[
roi.top : roi.bottom, roi.left : roi.right
]
return src_
else:
return img
def store_image(
self,
image: Any,
text: str,
rois: Any = (),
mosaic_list: list = None,
text_overlay: Any = False,
force_store: bool = False,
font_color: tuple = ipc.C_RED,
**kwargs,
) -> dict:
"""
Store image for debug or result
:param image: Source image
:param text: Image name, used as key
:param rois: Regions of interest, will be printed on image
:param mosaic_list: if present add image name to list
:param text_overlay: Text to be printed on top of stored image
:param force_store: Bypass storage options
:return: Dictionary containing stored image data
"""
if self.owner is not None:
target = self.owner
text = f"{self.view_option}_{text}"
else:
target = self
for dic in target.image_list:
if dic["name"].lower() == text:
target.image_list.remove(dic)
if (
(text and (target.store_images or (text.lower() == "mosaic")))
or force_store
or (text in self.forced_storage_images_list)
):
# Create dummy image if issue
if image is not None:
cp = image.copy()
else:
cp = np.full((100, 100, 3), ipc.C_FUCHSIA, np.uint8)
# Ensure image is 3D
if (rois or text_overlay) and (
len(cp.shape) == 2 or (len(cp.shape) == 3 and cp.shape[2] == 1)
):
cp = np.dstack((cp, cp, cp))
# Print ROIs if needed
if isinstance(rois, bool) and rois:
rois = self.rois_list
if rois:
for roi in rois:
cp = roi.draw_to(cp, line_width=2)
# Print text if needed
if isinstance(text_overlay, str):
self.draw_text(
img=cp,
text=text_overlay,
fnt_color=font_color,
)
elif text_overlay:
self.draw_text(
img=cp,
text=text.replace(", ", "\n"),
fnt_color=font_color,
)
new_dict = dict(name=text, image=cp, written=False)
target.image_list.append(new_dict)
if target.write_images == "plot":
target.plot_image(img_dict=new_dict, destroy_window=True)
if mosaic_list is not None and text:
mosaic_list.append(text)
return new_dict
else:
return dict()
def image_path_from_name(self, name: str):
"""Calculates target image saving path from name
:param name: name of the image in the dictionary cf. image_list
:return: destination file path
"""
image_dict = self.retrieve_image_dict(name)
if image_dict is not None:
return self.image_storage_path(image_dict)
else:
return ""
def image_storage_path(
self,
dic: dict,
index: str = "",
ext: str = "jpg",
is_force_output: bool = False,
is_force_fullname: bool = False,
) -> str:
"""returns path to which an image will be written
Arguments:
dic {dictionary} -- image info: 'mode', 'text', 'image'
Keyword Arguments:
index {str} -- image index to be printed as a prefix (default: {''})
ext {str} -- extension format (default: {'jpg'})
Returns:
str -- destination file path
"""
store_ = is_force_output or self.is_store_image(dic["name"])
if not store_:
return ""
if dic["name"] == "use_source_name":
return f"{self.dst_path}{self.name}.{ext}"
if is_force_fullname:
if index:
return f'{self.dst_path}{index}_{self.name}_{dic["name"]}.{ext}'
else:
return f'{self.dst_path}{self.name}_{dic["name"]}.{ext}'
else:
if self.is_plot_image(dic["name"]):
return ""
elif self.is_save_image(dic["name"]):
if dic["name"] == "mosaic":
tmp_path = "{}{}".format(self.dst_path, "mosaics")
tmp_path = os.path.join(tmp_path, "")
force_directories(tmp_path)
return "{}{}.jpg".format(tmp_path, self.name)
else:
tmp_path = f"{self.dst_path}{self.name}"
tmp_path = os.path.join(tmp_path, "")
force_directories(tmp_path)
return f'{tmp_path}{index}_{dic["name"]}.{ext}'
else:
return ""
def save_image(self, img: Any, idx: int = -1) -> None:
if idx >= 0:
str_idx = str(idx)
else:
str_idx = ""
tmp_path = self.image_storage_path(img, index=str_idx)
if tmp_path:
cv2.imwrite(tmp_path, img["image"])
img["written"] = True
def plot_image(self, img_dict: dict, destroy_window: bool = False) -> bool:
res = False
try:
cv2.imshow(
img_dict["name"],
ipc.resize_image(
img_dict["image"].copy(),
target_rect=RectangleRegion(left=0, width=800, top=0, height=600),
keep_aspect_ratio=True,
),
)
cv2.waitKey(0)
if destroy_window:
cv2.destroyAllWindows()
except Exception as e:
logger.exception(f'Unable to plot {img_dict["name"]}: "{repr(e)}"')
res = False
else:
res = True
finally:
return res
def print_image(self, img: Any, idx: int = -1) -> None:
"""
Print image according to options
:param img: numpy array
:param idx: int
:return:
"""
if img["written"] is not True:
if self.is_plot_image(img["name"]):
self.plot_image(img_dict=img)
elif self.is_save_image(img["name"]):
self.save_image(img=img, idx=idx)
def retrieve_image_dict(self, dict_name: str):
"""Retrieve image dictionary from the name key
:rtype: bool, dict
:param dict_name: key
:return: success, image dictionary
"""
if dict_name.lower() == "":
return None
else:
for dic in self.image_list:
if dic["name"].lower() == dict_name.lower():
return dic
if dict_name.lower() == "source":
return self.store_image(self.source_image, "source")
if dict_name.lower() == "mask":
return self.store_image(self.mask, "mask")
return None
def print_mosaic(self, padding: 2):
if (self.store_mosaic.lower() != "none") or (self.write_mosaic.lower() != "none"):
if self._mosaic_data is None:
if self.store_mosaic.lower() == "debug":
self._mosaic_data = np.array(
["source", "img_wth_tagged_cnt", "shapes"]
)
elif self.store_mosaic.lower() == "result":
img_lst = ["!n", "!n", "!n", "!n"]
available_images = [dic["name"] for dic in self.image_list]
if "true_source_image" in available_images:
img_lst[0] = "true_source_image"
else:
img_lst[0] = "source"
if "mask" in available_images:
img_lst[1] = "mask"
if "pseudo_on" in available_images:
img_lst[2] = "pseudo_on"
if "shapes" in available_images:
img_lst[3] = "shapes"
for img_name in reversed(available_images):
if img_lst.count("!n") == 0:
break
if not (img_name in img_lst) and (img_name != "histogram"):
try:
idx = len(img_lst) - 1 - img_lst[::-1].index("!n")
except ValueError as _:
break
img_lst[idx] = img_name
self._mosaic_data = np.array(
[[img_lst[0], img_lst[1]], [img_lst[2], img_lst[3]]]
)
else:
raise NotImplementedError
try:
canvas = self.build_mosaic(padding=padding)
mosaic_ = self.store_image(canvas, "mosaic")
self.print_image(mosaic_)
except Exception as e:
# Unsupported format detected
logger.exception(
'Exception: "{}" - Image: "{}", unsupported mosaic'.format(
repr(e), str(self)
)
)
def print_images(self):
"""Prints images to disc according to options and selection
Keyword Arguments:
selection {list} -- List of image names to be printed (default: {[]})
"""
if self.write_images == "print":
for i, img in enumerate(self.image_list):
if img["written"] is True:
continue
self.print_image(img, i + 1)
self.print_mosaic()
def avg_brightness_contrast(
self, img: Any = None, mode: str = "std", mask=None
) -> tuple:
"""Calculates average brightness using one of 3 available methods
* std='standard, objective'
* p1='perceived option 1
* p2='perceived option 2, slower to calculate
:param mask:
:param img:
:param mode: std, p1 or p2
:return: mean, std_dev
"""
if img is None:
img = self.current_image
b, g, r = cv2.split(img)
if mode == "std":
c = r * 0.2126 + g * 0.7152 + b * 0.0722
elif mode == "p1":
c = r * 0.299 + g * 0.587 + b * 0.114
elif mode == "p2":
c = np.sqrt(
0.241 * np.power(r.astype(np.float), 2)
+ 0.691 * np.power(g.astype(np.float), 2)
+ 0.068 * np.power(b.astype(np.float), 2)
)
else:
logger.error("Unknown average calculation mode")
return 0, 0
if mask is None:
tmp_tuple = cv2.meanStdDev(c.reshape(c.shape[1] * c.shape[0]))
else:
tmp_tuple = cv2.meanStdDev(
c.reshape(c.shape[1] * c.shape[0]),
mask=mask.reshape(mask.shape[1] * mask.shape[0]),
)
return tmp_tuple[0][0][0], tmp_tuple[1][0][0]
def object_composition(self, img: Any, contours: Any, hierarchy: Any):
"""From PlantCV: Groups objects into a single object, usually done after object filtering.
Inputs:
contours = object list
Returns:
group = grouped contours list
mask = image mask
:param img: numpy array
:param contours: list
:param hierarchy: list
:return group: list
:return mask: numpy array
"""
ori_img = np.copy(img)
if len(ori_img.shape) == 2:
ori_img = np.dstack((ori_img, ori_img, ori_img))
mask = np.zeros_like(a=ori_img, dtype=np.uint8)
else:
mask = np.zeros_like(a=ori_img[:, :, 0], dtype=np.uint8)
stack = np.zeros((len(contours), 1))
for c, cnt in enumerate(contours):
# if hierarchy[0][c][3] == -1:
if hierarchy[0][c][2] == -1 and hierarchy[0][c][3] > -1:
stack[c] = 0
else:
stack[c] = 1
ids = np.where(stack == 1)[0]
if len(ids) > 0:
group = np.vstack([contours[i] for i in ids])
cv2.drawContours(mask, contours, -1, 255, -1, hierarchy=hierarchy)
if self.store_images:
dbg_img = self.draw_image(
src_image=ori_img, src_mask=mask, background="bw", foreground="source"
)
cv2.drawContours(dbg_img, group, -1, (255, 0, 0), 6)
for cnt in contours:
cv2.drawContours(dbg_img, cnt, -1, (255, 0, 255), 4)
self.store_image(dbg_img, "objcomp")
return group, mask
else:
logger.error(f"Warning: {repr(self.name)} Invalid contour.")
return None, None
@time_method
def analyze_object(self, img: Any, mask: Any):
"""Outputs numeric properties for an input object (contour or grouped contours).
Inputs:
img = image object (most likely the original), color(RGB)
obj = single or grouped contour object
mask = binary image to use as mask for moments analysis
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
shape_header = shape data table headers
shape_data = shape data table values
:param img: numpy array
:param obj: list
:param mask: numpy array
:return:
"""
obj, mask = self.prepare_analysis(
self.draw_image(src_mask=mask, background="silver", foreground="source"), mask
)
# Valid objects can only be analyzed if they have >= 5 vertices
if len(obj) < 5:
return None, None, None
ori_img = np.copy(img)
hull = cv2.convexHull(obj)
m = cv2.moments(mask, binaryImage=True)
area = m["m00"]
if area:
# x and y position (bottom left?) and extent x (width) and extent y (height)
x, y, width, height = cv2.boundingRect(obj)
# Centroid (center of mass x, center of mass y)
cmx, cmy = (m["m10"] / m["m00"], m["m01"] / m["m00"])
# Store Shape Data
self.csv_data_holder.update_csv_value("area", area)
if self.csv_data_holder.has_csv_key("centroid"):
self.csv_data_holder.update_csv_value("centroid_x", cmx, force_pair=True)
self.csv_data_holder.update_csv_value("centroid_y", cmy, force_pair=True)
self.csv_data_holder.data_list.pop("centroid", None)
hull_area = cv2.contourArea(hull)
self.csv_data_holder.update_csv_value("hull_area", hull_area)
self.csv_data_holder.update_csv_value("shape_solidity", area / hull_area)
x, y, w, h = cv2.boundingRect(obj)
self.csv_data_holder.update_csv_value("shape_extend", float(area) / (w * h))
if self.csv_data_holder.has_csv_key("straight_bounding_rectangle"):
self.csv_data_holder.update_csv_value(
"straight_bounding_rectangle_left", x, force_pair=True
)
self.csv_data_holder.update_csv_value(
"straight_bounding_rectangle_width", w, force_pair=True
)
self.csv_data_holder.update_csv_value(
"straight_bounding_rectangle_top", y, force_pair=True
)
self.csv_data_holder.update_csv_value(
"straight_bounding_rectangle_height", h, force_pair=True
)
self.csv_data_holder.data_list.pop("straight_bounding_rectangle", None)
straight_bounding_rec_thickness = 4
else:
straight_bounding_rec_thickness = 0
if self.csv_data_holder.has_csv_key("rotated_bounding_rectangle"):
(x, y), (w, h), r = cv2.minAreaRect(obj)
wl = max(w, h)
hl = min(w, h)
self.csv_data_holder.update_csv_value(
key="rotated_bounding_rectangle_cx", value=x, force_pair=True
)
self.csv_data_holder.update_csv_value(
key="rotated_bounding_rectangle_cy", value=y, force_pair=True
)
self.csv_data_holder.update_csv_value(
key="rotated_bounding_rectangle_width", value=wl, force_pair=True
)
self.csv_data_holder.update_csv_value(
key="rotated_bounding_rectangle_height", value=hl, force_pair=True
)
self.csv_data_holder.update_csv_value(
key="rotated_bounding_rectangle_rotation",
value=r + 180,
force_pair=True,
)
self.csv_data_holder.data_list.pop("rotated_bounding_rectangle", None)
bounding_rec_thickness = 4
else:
bounding_rec_thickness = 0
if self.csv_data_holder.has_csv_key("minimum_enclosing_circle"):
(x, y), radius = cv2.minEnclosingCircle(obj)
self.csv_data_holder.update_csv_value(
"minimum_enclosing_circle_cx", x, force_pair=True
)
self.csv_data_holder.update_csv_value(
"minimum_enclosing_circle_cy", y, force_pair=True
)
self.csv_data_holder.update_csv_value(
"minimum_enclosing_circle_radius", radius, force_pair=True
)
self.csv_data_holder.data_list.pop("minimum_enclosing_circle", None)
enclosing_circle_thickness = 4
else:
enclosing_circle_thickness = 0
self.csv_data_holder.update_csv_value("shape_height", height)
if self.csv_data_holder.has_csv_key("width_data"):
mask_data = ipc.MaskData(mask)
_, _, _, min_, max_, avg_, std_ = mask_data.width_quantile_stats(
1, 0, tag=0
)
self.csv_data_holder.update_csv_value(
"shape_width", width, force_pair=True
)
self.csv_data_holder.update_csv_value(
"shape_width_min", min_, force_pair=True
)
self.csv_data_holder.update_csv_value(
"shape_width_max", max_, force_pair=True
)
self.csv_data_holder.update_csv_value(
"shape_width_avg", avg_, force_pair=True
)
self.csv_data_holder.update_csv_value(
"shape_width_std", std_, force_pair=True
)
self.csv_data_holder.data_list.pop("width_data", None)
if self.store_images:
# Start with the sure ones
self.store_image(
self.draw_image(
src_image=ori_img,
src_mask=mask,
objects=obj,
background="bw",
foreground="source",
contour_thickness=4,
hull_thickness=4
if self.csv_data_holder.has_csv_key("hull_area")
else 0,
width_thickness=4
if self.csv_data_holder.has_csv_key("shape_width")
else 0,
height_thickness=4
if self.csv_data_holder.has_csv_key("shape_height")
else 0,
centroid_width=10
if self.csv_data_holder.has_csv_key("centroid_x")
else 0,
),
"shapes",
)
self.store_image(
self.draw_image(
src_image=mask,
src_mask=mask,
objects=obj,
background="bw",
foreground="source",
contour_thickness=4,
hull_thickness=4
if self.csv_data_holder.has_csv_key("hull_area")
else 0,
width_thickness=4
if self.csv_data_holder.has_csv_key("shape_width")
else 0,
height_thickness=4
if self.csv_data_holder.has_csv_key("shape_height")
else 0,
centroid_width=10
if self.csv_data_holder.has_csv_key("centroid_x")
else 0,
),
"shapes_on_mask",
)
# Add new ones
if (
enclosing_circle_thickness
+ bounding_rec_thickness
+ straight_bounding_rec_thickness
> 0
):
self.store_image(
self.draw_image(
src_image=ori_img,
src_mask=mask,
objects=obj,
background="bw",
foreground="source",
enclosing_circle_thickness=enclosing_circle_thickness,
bounding_rec_thickness=bounding_rec_thickness,
straight_bounding_rec_thickness=straight_bounding_rec_thickness,
),
"more_shapes",
)
self.store_image(
self.draw_image(
src_image=mask,
src_mask=mask,
objects=obj,
background="bw",
foreground="source",
enclosing_circle_thickness=enclosing_circle_thickness,
bounding_rec_thickness=bounding_rec_thickness,
straight_bounding_rec_thickness=straight_bounding_rec_thickness,
),
"more_shapes_on_mask",
)
# handle width quantiles
keys = [k for k in self.csv_data_holder.data_list]
for k in keys:
if "quantile_width" in k:
_, kind, n = k.split("_")
n = int(n)
if kind.lower() == "width":
msk_dt = ipc.MaskData(mask)
qtl_img = np.zeros_like(mask)
qtl_img = np.dstack((qtl_img, qtl_img, qtl_img))
for i in range(n):
(
total_,
hull_,
solidity_,
min_,
max_,
avg_,
std_,
) = msk_dt.width_quantile_stats(n, i, tag=i)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_area", total_, True
)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_hull", hull_, True
)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_solidity", solidity_, True
)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_min_{kind}", min_, True
)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_max_{kind}", max_, True
)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_avg_{kind}", avg_, True
)
self.csv_data_holder.update_csv_value(
f"quantile_width_{i + 1}_{n}_std_{kind}", std_, True
)
p_qt_msk = msk_dt.height_quantile_mask(
total=n, index=i, colour=int((i + 1) / (n + 1) * 255)
)
qtl_img = cv2.bitwise_or(
qtl_img,
np.dstack(
(np.zeros_like(mask), p_qt_msk, np.zeros_like(mask))
),
)
self.store_image(qtl_img, f"quantiles_width_{n}")
self.csv_data_holder.data_list.pop(k, None)
else:
pass
return True
@time_method
def analyze_bound(
self,
img: Any,
mask: Any,
line_position: int = -1,
pseudo_color_channel: str = " v",
):
"""User-input boundary line tool
Inputs:
img = image
mask = mask made from selected contours
line_position = position of boundary line (a value of 0 would draw the line through the bottom of the image)
:param pseudo_color_channel: str
:param mask: numpy array, mask made from selected contours
:param line_position: int
:return success: bool
"""
if (line_position < 0) or not self.csv_data_holder.has_csv_key("bound_data"):
self.csv_data_holder.data_list.pop("bound_data", None)
return True
self.csv_data_holder.data_list.pop("bound_data", None)
roi_top = RectangleRegion(
left=0, width=self.width, top=0, height=line_position, name="roi_top"
)
roi_bottom = RectangleRegion(
left=0,
width=self.width,
top=line_position,
height=self.height - line_position,
name="roi_bottom",
)
mask_top = self.crop_to_roi(img=mask, roi=roi_top)
mask_bottom = self.crop_to_roi(img=mask, roi=roi_bottom)
mask_data_top = ipc.MaskData(mask_top)
mask_data_bottom = ipc.MaskData(mask_bottom)
area_ = self.csv_data_holder.retrieve_csv_value("area")
if area_ is None:
mask_data = ipc.MaskData(mask)
area_ = mask_data.area
if area_:
try:
t_height = mask_data_top.mask.shape[0] - mask_data_top.top_index
b_height = mask_data_bottom.height
self.csv_data_holder.update_csv_value(
"above_bound_height", t_height, force_pair=True
)
self.csv_data_holder.update_csv_value(
"above_bound_area", mask_data_top.area, force_pair=True
)
self.csv_data_holder.update_csv_value(
"above_bound_percent_area",
mask_data_top.area / area_ * 100,
force_pair=True,
)
self.csv_data_holder.update_csv_value(
"below_bound_height", b_height, force_pair=True
)
self.csv_data_holder.update_csv_value(
"below_bound_area", mask_data_bottom.area, force_pair=True
)
self.csv_data_holder.update_csv_value(
"below_bound_percent_area",
mask_data_bottom.area / area_ * 100,
force_pair=True,
)
self.csv_data_holder.update_csv_value(
"shape_height", t_height + b_height, force_pair=True
)
if self.store_images:
c = self.get_channel(src_img=img, channel=pseudo_color_channel)
background_img = np.dstack((c, c, c))
p_img = self.draw_image(
src_image=background_img,
channel=pseudo_color_channel,
src_mask=mask,
foreground="false_colour",
background="source",
normalize_before=True,
color_map=cv2.COLORMAP_SUMMER,
roi=roi_top,
centroid_width=10,
height_thickness=4,
width_thickness=4,
)
p_img = self.draw_image(
src_image=p_img,
channel=pseudo_color_channel,
src_mask=mask,
foreground="false_colour",
background="source",
normalize_before=False,
color_map=cv2.COLORMAP_HOT,
roi=roi_bottom,
centroid_width=10,
height_thickness=4,
width_thickness=4,
)
cv2.line(
p_img,
(0, line_position),
(self.width, line_position),
ipc.C_RED,
3,
)
self.store_image(p_img, "bounds")
except Exception as e:
logger.exception(f'Failed to analyse bounds "{repr(e)}"')
return False
else:
return True
else:
return False
@staticmethod
def apply_mask(img: Any, mask: Any, background_color: Any):
"""
Apply white image mask to image with a selected background color
Inputs:
* img = image object, color(RGB)
* mask = image object, binary (black background with white object)
* background_color = color tuple or white or black
Returns:
masked_img = masked image
:param img: numpy array
:param mask: numpy array
:param background_color: tuple or string
:return masked_img: numpy array
"""
if background_color.lower() == "white":
background_color = (255, 255, 255)
elif background_color.lower() == "black":
background_color = (0, 0, 0)
rem_img = cv2.bitwise_and(img, img, mask=mask)
background = np.full(img.shape, background_color, np.uint8)
background = cv2.bitwise_and(background, background, mask=255 - mask)
return cv2.bitwise_or(background, rem_img)
def analyse_chlorophyll(self, img: Any, mask: Any):
"""
Extract chlorophyll data
"""
if self.csv_data_holder.has_csv_key(
"chlorophyll_mean"
) or self.csv_data_holder.has_csv_key("chlorophyll_std_dev"):
try:
b, g, r = cv2.split(cv2.bitwise_and(img, img, mask=mask))
c = np.exp(
(-0.0280 * r * 1.04938271604938)
+ (0.0190 * g * 1.04938271604938)
+ (-0.0030 * b * 1.04115226337449)
+ 5.780
)
if self.store_images:
calc_img = cv2.bitwise_and(c, c, mask=mask)
calc_img = (
(calc_img - calc_img.min())
/ (calc_img.max() - calc_img.min())
* 255
).astype(np.uint8)
pseudo = self.draw_image(
src_image=img,
channel=calc_img,
background="source",
foreground="false_colour",
color_map=cv2.COLORMAP_RAINBOW,
)
self.store_image(pseudo, "pseudo_chlorophyll_on_img")
self.store_image(calc_img, "chlorophyll_calculated")
tmp_tuple = cv2.meanStdDev(
c.reshape(c.shape[1] * c.shape[0]),
mask=mask.reshape(mask.shape[1] * mask.shape[0]),
)
self.csv_data_holder.update_csv_value(
"chlorophyll_mean", tmp_tuple[0][0][0]
)
self.csv_data_holder.update_csv_value(
"chlorophyll_std_dev", tmp_tuple[1][0][0]
)
except Exception as e:
return False
else:
return True
else:
return True
@time_method
def analyze_color(
self,
img: Any,
mask: Any,
pseudo_color_channel: str = "v",
pseudo_color_map: int = 2,
pseudo_bkg: str = "bw",
):
"""Analyze the color properties of an image object
Inputs:
img = image
mask = mask made from selected contours
debug = None, print, or plot. Print = save to file, Plot = print to screen.
hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv'
color_slice_type = 'None', 'rgb', 'hsv' or 'lab'
pseudo_channel = 'None', 'l', 'm' (green-magenta), 'y' (blue-yellow), h','s', or 'v',
creates pseudo colored image based on the specified channel
pseudo_bkg = 'img' => channel image,
'white' => white background image,
'both' => both img and white options
:param pseudo_color_map:
:param img: numpy array
:param mask: numpy array
:param pseudo_color_channel: str
:param pseudo_bkg: str
"""
if not (
self.csv_data_holder.has_csv_key("color_std_dev")
or self.csv_data_holder.has_csv_key("color_mean")
):
return True
masked = cv2.bitwise_and(img, img, mask=mask)
channel_data = {}
for c in self.file_handler.channels_data:
if c[0] == "chla":
continue
channel_data[c[1]] = dict(
color_space=c[0],
channel_name=c[1],
data=self.get_channel(src_img=masked, channel=c[1]),
graph_color=ipc.channel_color(c[1]),
)
self.csv_data_holder.update_csv_value("hist_bins", f"{256}")
for k, v in channel_data.items():
if v["data"] is None:
logger.warning(f"Missing channel {ipc.get_hr_channel_name(k)}")
continue
tmp_tuple = cv2.meanStdDev(
src=v["data"].reshape(v["data"].shape[1] * v["data"].shape[0]),
mask=mask.reshape(mask.shape[1] * mask.shape[0]),
)
v["hist"] = cv2.calcHist([v["data"]], [0], mask, [256], [0, (256 - 1)])
seed_ = f'{v["color_space"]}_{k}'
self.csv_data_holder.update_csv_value(
key=f"{seed_}_std_dev",
value=tmp_tuple[1][0][0],
force_pair=self.csv_data_holder.has_csv_key("color_std_dev"),
)
self.csv_data_holder.update_csv_value(
key=f"{seed_}_mean",
value=tmp_tuple[0][0][0],
force_pair=self.csv_data_holder.has_csv_key("color_mean"),
)
# Remove consumed keys
if self.csv_data_holder.has_csv_key("color_std_dev"):
self.csv_data_holder.data_list.pop("color_std_dev", None)
if self.csv_data_holder.has_csv_key("color_mean"):
self.csv_data_holder.data_list.pop("color_mean", None)
# Create Histogram Plot
if self.store_images:
fig = plt.figure(figsize=(10, 10), dpi=100)
for k, v in channel_data.items():
if v["data"] is None:
continue
plt.plot(v["hist"], label=v["channel_name"])
plt.xlim([0, 256 - 1])
plt.legend()
if self.write_images != "print":
fig.canvas.draw()
# Now we can save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
self.store_image(data, "histogram")
elif self.write_images != "plot":
plt.axis("off")
plt.title("histogram")
fig.tight_layout()
plt.show()
plt.clf()
plt.close()
pseudo_colour = self.draw_image(
src_image=img,
channel=pseudo_color_channel,
color_map=pseudo_color_map,
foreground="false_colour",
src_mask=mask,
background=pseudo_bkg,
)
self.store_image(pseudo_colour, "pseudo_on")
# handle color quantiles
keys = [k for k in self.csv_data_holder.data_list]
for k in keys:
if "quantile_color" in k:
*_, n = k.split("_")
n = int(n)
for c, v in channel_data.items():
if v["data"] is None:
logger.warning(
f'Missing channel {v["color_space"]}, {v["channel_name"]}'
)
continue
seed_ = f'{v["color_space"]}_{c}'
hist = cv2.calcHist([v["data"]], [0], mask, [n], [0, (256 - 1)])
total_pixels = | np.sum(hist) | numpy.sum |
#!venv/bin/python
import matplotlib
matplotlib.use("tkAgg", warn=False)
import tkinter as tk
import matplotlib.pyplot as plt
import matplotlib.backends.backend_tkagg as tkagg
import src.parameters
import src.io
import src.ions
import src.aggregates
import src.peptides
import pandas as pd
import numpy as np
import scipy
class GUI(object):
def __init__(self, dataset):
self.dataset = dataset
self.root = tk.Tk()
self.root.geometry("1500x800")
self.root.winfo_toplevel().title("Ion-network Browser")
self.root_frame = tk.Frame(self.root, bd=1, relief="raised")
self.root_frame.pack(fill="both", expand=1)
self.__initOptionsFrame()
self.__initAggregatesFrame()
self.__initIonsFrame()
self.refresh()
self.start()
def start(self):
self.root.mainloop()
def __initOptionsFrame(self):
self.options_frame = tk.Frame(
self.root_frame,
bd=1,
relief="raised"
)
self.options_frame.pack(
side="left",
fill="x"
)
self.options = {}
self.addLabelOption()
self.addAxisOption()
# self.addViewTypeOption()
self.addShowEdgesOption()
self.addMinimumSignalOption()
self.addMaximumSignalOption()
self.addFDROption()
self.addSelectAllVisibleOption()
self.addUnselectAllVisibleOption()
self.addExpandNeighborOption()
self.addRefreshOption()
for option in self.options.values():
option.pack(anchor="w", fill="x", expand=1)
def addViewTypeOption(self):
self.options["View type"] = tk.Frame(
self.options_frame,
bd=1,
relief="raised"
)
tk.Label(
self.options["View type"],
text="View type",
).pack(anchor="w")
view_types = [
"Aggregates"
] + [
name.split("/")[-1][:-4] for name in self.dataset.parameters["APEX_FILE_NAMES"]
]
self.view_type = tk.IntVar()
self.view_type.set(-1)
for view_index, view_type in enumerate(view_types):
tk.Radiobutton(
self.options["View type"],
text=view_type,
value=view_index - 1,
variable=self.view_type,
command=self.refresh
).pack(anchor="w")
def addRefreshOption(self):
self.options["Refresh"] = tk.Button(
self.options_frame,
text="Refresh",
command=self.refresh,
anchor="w"
)
def addSelectAllVisibleOption(self):
def onclick():
self.dataset.selectAllVisible()
self.dataset.updateLabelSelection(self)
self.dataset.plotSelectedNodes(self)
self.dataset.plotIons(self)
self.refreshAggregateCanvas()
self.refreshIonCanvas()
self.options["Select all visible nodes"] = tk.Button(
self.options_frame,
text="Select all visible nodes",
command=onclick,
anchor="w"
)
def addUnselectAllVisibleOption(self):
def onclick():
self.dataset.unselectAllVisible()
self.dataset.updateLabelSelection(self)
self.dataset.plotSelectedNodes(self)
self.dataset.plotIons(self)
self.refreshAggregateCanvas()
self.refreshIonCanvas()
self.options["Unselect all visible nodes"] = tk.Button(
self.options_frame,
text="Unselect all visible nodes",
command=onclick,
anchor="w"
)
def addExpandNeighborOption(self):
def onclick():
self.dataset.selectVisibleNeighbors()
self.dataset.updateLabelSelection(self)
self.dataset.plotSelectedNodes(self)
self.dataset.plotIons(self)
self.refreshAggregateCanvas()
self.refreshIonCanvas()
self.options["Expand neighbors"] = tk.Button(
self.options_frame,
text="Expand neighbors",
anchor="w",
command=onclick
)
def addShowEdgesOption(self):
def onclick():
self.dataset.plotEdges(self)
self.refreshAggregateCanvas()
self.show_edges = tk.IntVar()
self.options["Show edges"] = tk.Checkbutton(
self.options_frame,
text="Show edges",
variable=self.show_edges,
anchor="w",
command=onclick
)
def addLabelOption(self):
self.options["Label type"] = tk.Frame(
self.options_frame,
bd=1,
relief="raised"
)
tk.Label(
self.options["Label type"],
text="Label type",
).pack(anchor="w")
label_types = [
"None",
"Peptide",
"Protein",
"m/z",
"rt",
"dt",
"Index",
]
self.label_type = tk.StringVar()
self.label_type.set(label_types[0])
def onclick():
self.dataset.updateLabelSelection(self)
# self.dataset.plotSelectedNodes(self)
# self.dataset.plotIons(self)
self.refreshAggregateCanvas()
self.refreshIonCanvas()
for label_type in label_types:
tk.Radiobutton(
self.options["Label type"],
text=label_type,
variable=self.label_type,
value=label_type,
command=onclick
).pack(anchor="w")
def addAxisOption(self):
self.options["Ion axis type"] = tk.Frame(
self.options_frame,
bd=1,
relief="raised"
)
tk.Label(
self.options["Ion axis type"],
text="Ion axis type",
).pack(anchor="w")
label_types = [
"Log intensity",
"rt",
"dt",
]
self.axis_type = tk.StringVar()
self.axis_type.set(label_types[0])
def onclick():
axis_types = {
"Log intensity": "CALIBRATED_INTENSITY",
"dt": "DT",
"rt": "RT",
}
axis_type = axis_types[self.getAxisType()]
axis_min_lim = np.min(self.dataset.ions[axis_type])
axis_max_lim = np.max(self.dataset.ions[axis_type])
if axis_type == "CALIBRATED_INTENSITY":
axis_min_lim = np.log2(axis_min_lim)
axis_max_lim = np.log2(axis_max_lim)
self.ion_ax.set_ylim(
axis_min_lim,
axis_max_lim,
)
self.ion_ax.set_ylabel(axis_type)
self.dataset.labelIons(self)
self.dataset.plotIons(self)
self.refreshIonCanvas()
for label_type in label_types:
tk.Radiobutton(
self.options["Ion axis type"],
text=label_type,
variable=self.axis_type,
value=label_type,
command=onclick
).pack(anchor="w")
def addFDROption(self):
self.options["Log FDR threshold"] = tk.Frame(
self.options_frame,
bd=1,
relief="raised"
)
tk.Label(
self.options["Log FDR threshold"],
text="Log FDR threshold",
).pack(anchor="w")
def onclick(*args):
label_type = self.getLabelType()
if (label_type == "Peptide") or (label_type == "Protein"):
self.dataset.updateLabelSelection(self)
self.refreshIonCanvas()
self.dataset.plotAnnotatedNodes(self)
self.refreshAggregateCanvas()
self.fdr_threshold_slider = tk.Scale(
self.options["Log FDR threshold"],
from_=-5,
resolution=0.1,
to=0,
orient="horizontal",
command=onclick
)
self.fdr_threshold_slider.set(-2)
self.fdr_threshold_slider.pack(anchor="w")
def addMinimumSignalOption(self):
self.options["Minimum signal"] = tk.Frame(
self.options_frame,
bd=1,
relief="raised"
)
tk.Label(
self.options["Minimum signal"],
text="Minimum signal",
).pack(anchor="w")
self.minimum_replicate_count_slider = tk.Scale(
self.options["Minimum signal"],
from_=self.dataset.parameters["SIGNAL_COUNT_THRESHOLD"],
to=self.dataset.parameters["SAMPLE_COUNT"],
orient="horizontal",
command=lambda arg: self.refresh()
)
self.minimum_replicate_count_slider.set(
self.dataset.parameters["SAMPLE_COUNT"]
)
self.minimum_replicate_count_slider.pack(anchor="w")
def addMaximumSignalOption(self):
self.options["Maximum signal"] = tk.Frame(
self.options_frame,
bd=1,
relief="raised"
)
tk.Label(
self.options["Maximum signal"],
text="Maximum signal",
).pack(anchor="w")
self.maximum_replicate_count_slider = tk.Scale(
self.options["Maximum signal"],
from_=self.dataset.parameters["SIGNAL_COUNT_THRESHOLD"],
to=self.dataset.parameters["SAMPLE_COUNT"],
orient="horizontal",
command=lambda arg: self.refresh()
)
self.maximum_replicate_count_slider.set(
self.dataset.parameters["SAMPLE_COUNT"]
)
self.maximum_replicate_count_slider.pack(anchor="w")
def __initAggregatesFrame(self):
self.aggregate_frame = tk.Frame(
self.root_frame,
bd=1,
relief="raised"
)
self.aggregate_frame.pack(
side="left",
fill="both",
expand=True
)
self.aggregate_fig = plt.Figure()
self.aggregate_ax = self.aggregate_fig.add_subplot(111)
self.aggregate_canvas = tkagg.FigureCanvasTkAgg(self.aggregate_fig, self.aggregate_frame)
self.aggregate_canvas.get_tk_widget().pack(
fill='both',
expand=True
)
self.aggregate_toolbar = tkagg.NavigationToolbar2Tk(
self.aggregate_canvas,
self.aggregate_frame
)
default_release = self.aggregate_toolbar.release
def release(event):
default_release(event)
self.refresh()
self.aggregate_toolbar.release = release
self.aggregate_ax.set_xlim(
[
np.min(self.dataset.anchors["RT"]),
np.max(self.dataset.anchors["RT"]),
]
)
self.aggregate_ax.set_ylim(
[
np.min(self.dataset.anchors["DT"]),
np.max(self.dataset.anchors["DT"]),
]
)
self.aggregate_ax.set_xlabel("RT")
self.aggregate_ax.set_ylabel("DT")
self.aggregate_ax.set_title("Aggregates")
def onclick(event):
if (event is None) or (not event.dblclick):
return
self.dataset.updateSelectedNodes(self, event)
self.dataset.plotSelectedNodes(self)
self.dataset.plotIons(self)
self.refreshAggregateCanvas()
self.refreshIonCanvas()
self.aggregate_canvas.mpl_disconnect(self.click_connection)
self.click_connection = self.aggregate_canvas.mpl_connect(
'button_press_event',
onclick
)
self.click_connection = self.aggregate_canvas.mpl_connect(
'button_press_event',
onclick
)
def __initIonsFrame(self):
self.ion_frame = tk.Frame(
self.root_frame,
bd=1,
relief="raised"
)
self.ion_frame.pack(
side="left",
fill="y",
)
self.ion_fig = plt.Figure()
self.ion_ax = self.ion_fig.add_subplot(111)
self.ion_canvas = tkagg.FigureCanvasTkAgg(self.ion_fig, self.ion_frame)
self.ion_toolbar = tkagg.NavigationToolbar2Tk(
self.ion_canvas,
self.ion_frame
)
self.ion_canvas.get_tk_widget().pack(
fill='both',
expand=True
)
self.getAxisType()
self.ion_ax.set_title("Ions")
def refresh(self):
self.dataset.updateVisibleNodes(self)
self.dataset.plotVisibleNodes(self)
self.dataset.plotAnnotatedNodes(self)
self.dataset.plotSelectedNodes(self)
self.dataset.plotEdges(self)
self.refreshAggregateCanvas()
self.dataset.plotIons(self)
self.refreshIonCanvas()
def refreshAggregateCanvas(self):
self.aggregate_canvas.draw()
self.aggregate_canvas.flush_events()
def refreshIonCanvas(self):
self.ion_canvas.draw()
self.ion_canvas.flush_events()
def getMinimumReplicateCount(self):
return self.minimum_replicate_count_slider.get()
def getMaximumReplicateCount(self):
return self.maximum_replicate_count_slider.get()
def getLabelType(self):
return self.label_type.get()
def getViewType(self):
try:
return self.view_type.get()
except AttributeError:
return -1
def getFDRThreshold(self):
return 10 ** self.fdr_threshold_slider.get()
def getAxisType(self):
return self.axis_type.get()
def getShowEdges(self):
return self.show_edges.get()
def getVisibleBoundaries(self):
dt_low, dt_high = self.aggregate_ax.get_ylim()
rt_low, rt_high = self.aggregate_ax.get_xlim()
return rt_low, rt_high, dt_low, dt_high
def dummyCommand(self, *args, **kwargs):
print(self.getMinimumReplicateCount())
print(self.getMaximumReplicateCount())
print(self.getLabelType())
print(self.getViewType())
print(self.getFDRThreshold())
print(self.getAxisType())
print(self.getShowEdges())
class Dataset(object):
def __init__(self, parameters, **kwargs):
if isinstance(parameters, str):
parameters = src.parameters.importParameterDictFromJSON(parameters)
self.parameters = parameters
self.log = src.io.Log(self.parameters["LOG_FILE_NAME"][:-4] + "_browser.txt")
if "aggregates" in kwargs:
self.anchors = kwargs["aggregates"]
else:
self.anchors = src.io.loadArray("ANCHORS_FILE_NAME", self.parameters)
if "anchor_ions" in kwargs:
self.anchor_ions = kwargs["anchor_ions"]
else:
self.anchor_ions = src.io.loadMatrix(
"ANCHOR_IONS_FILE_NAME",
self.parameters,
)
if "ions" in kwargs:
self.ions = kwargs["ions"]
else:
self.ions = src.io.loadArray("IONS_FILE_NAME", self.parameters)
if "neighbors" in kwargs:
self.neighbors = kwargs["neighbors"]
else:
self.neighbors = src.io.loadMatrix(
"ANCHOR_NEIGHBORS_FILE_NAME",
self.parameters,
)
self.neighbors += self.neighbors.T
try:
if "anchor_peptide_score" in kwargs:
self.anchor_peptide_scores = kwargs["anchor_peptide_score"]
else:
self.anchor_peptide_scores = src.io.loadMatrix(
"ANCHOR_PEPTIDE_SCORES_FILE_NAME",
self.parameters,
)
if "anchor_peptide_match_counts" in kwargs:
self.anchor_peptide_match_counts = kwargs["anchor_peptide_match_counts"]
else:
self.anchor_peptide_match_counts = src.io.loadMatrix(
"ANCHOR_PEPTIDE_MATCH_COUNTS_FILE_NAME",
self.parameters,
)
self.percolated_annotations = pd.read_csv(
self.parameters["PERCOLATOR_TARGET_PIMS"],
delimiter="\t"
)
self.percolated_fdrs = self.percolated_annotations.values[:, 2]
self.percolated_anchors, self.percolated_peptides = self.anchor_peptide_match_counts.nonzero()
except FileNotFoundError:
self.anchor_peptide_scores = scipy.sparse.csr_matrix([])
self.anchor_peptide_match_counts = scipy.sparse.csr_matrix([])
self.percolated_annotations = pd.DataFrame()
self.percolated_fdrs = np.array([], dtype=np.int)
self.percolated_anchors = np.array([], dtype=np.int)
self.percolated_peptides = np.array([], dtype=np.int)
# self.proteins, self.total_protein_sequence, self.ptms, self.ptm_matrix = src.peptides.importProteinsAndPtms(
# self.parameters,
# self.log
# )
# self.peptides, self.peptide_index_matrix, self.digestion_matrix = src.peptides.digestProteins(
# self.proteins,
# self.total_protein_sequence,
# self.ptm_matrix,
# self.parameters,
# self.log
# )
database = src.peptides.loadDatabase(parameters["DATABASE_FILE_NAME"])
# base_mass_dict = database["base_mass_dict"]
self.proteins = database["proteins"]
self.total_protein_sequence = database["total_protein_sequence"]
# ptms = database["ptms"]
# ptm_matrix = database["ptm_matrix"]
self.peptides = database["peptides"]
self.peptide_index_matrix = database["peptide_index_matrix"]
# digestion_matrix = database["digestion_matrix"]
# peptide_masses = database["peptide_masses"]
# fragments = database["fragments"]
self.visible_nodes = np.array([], dtype=np.int)
self.selected_nodes = np.array([], dtype=np.bool)
self.node_labels = np.array([], dtype=np.bool)
def updateVisibleNodes(self, gui):
self.log.printMessage("Updating visible nodes")
previous_selection = self.visible_nodes[self.selected_nodes]
rt_low, rt_high, dt_low, dt_high = gui.getVisibleBoundaries()
selection = self.anchors["ION_COUNT"] >= gui.getMinimumReplicateCount()
selection &= self.anchors["ION_COUNT"] <= gui.getMaximumReplicateCount()
selection &= self.anchors["DT"] <= dt_high
selection &= self.anchors["DT"] >= dt_low
selection &= self.anchors["RT"] <= rt_high
selection &= self.anchors["RT"] >= rt_low
self.visible_nodes = | np.flatnonzero(selection) | numpy.flatnonzero |
# -*- coding: utf-8 -*-
import os
import argparse
import random
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models.resnet12_2 import resnet12
from models.meta_part_inference_mini import ProtoComNet
from models.PredTrainHead import LinearClassifier, LinearRotateHead
from utils import set_gpu, Timer, count_accuracy, check_dir, log
import pickle
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth])).cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def get_model(options):
# Choose the embedding network
if options.network == 'ResNet':
network = resnet12().cuda()
network = torch.nn.DataParallel(network)
fea_dim = 512
else:
print ("Cannot recognize the network type")
assert(False)
propa_head = ProtoComNet(opt=options, in_dim=fea_dim).cuda()
# Choose the classification head
if opt.use_trainval == 'True':
n_classes=80
else:
n_classes=64
if options.pre_head == 'LinearNet':
pre_head = LinearClassifier(in_dim=fea_dim, n_classes=n_classes).cuda()
elif options.pre_head == 'LinearRotateNet':
pre_head = LinearRotateHead(in_dim=fea_dim, n_classes=n_classes).cuda()
else:
print("Cannot recognize the dataset type")
assert (False)
if options.phase == 'pretrain':
from models.classification_heads_orgin import ClassificationHead
else:
from models.classification_heads import ClassificationHead
# Choose the classification head
if options.head == 'CosineNet':
cls_head = ClassificationHead(base_learner='CosineNet').cuda()
elif options.head == 'FuseCosNet':
cls_head = ClassificationHead(base_learner='FuseCos').cuda()
else:
print ("Cannot recognize the dataset type")
assert(False)
return (network, propa_head, pre_head, cls_head)
def get_dataset(options):
# Choose the embedding network
if options.dataset == 'miniImageNet':
from data.mini_imagenet import MiniImageNet, FewShotDataloader, MiniImageNetPC
# dataset_trainval = MiniImageNet(phase='trainval')
if options.phase == 'savepart':
dataset_train = MiniImageNet(phase='train', do_not_use_random_transf=True)
elif options.phase == 'metainfer':
dataset_train = MiniImageNetPC(phase='train', shot=options.train_shot)
else:
dataset_train = MiniImageNet(phase='train')
dataset_val = MiniImageNet(phase='val')
dataset_test = MiniImageNet(phase='test')
data_loader = FewShotDataloader
else:
print ("Cannot recognize the dataset type")
assert(False)
return (dataset_train, dataset_val, dataset_test, data_loader)
def seed_torch(seed=21):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def pre_train(opt, dataset_train, dataset_val, dataset_test, data_loader):
data_loader_pre = torch.utils.data.DataLoader
# Dataloader of Gidaris & Komodakis (CVPR 2018)
if opt.use_trainval == 'True':
train_way = 80
dloader_train = data_loader_pre(
dataset=dataset_trainval,
batch_size=128,
shuffle=True,
num_workers=4
)
else:
train_way = 64
dloader_train = data_loader_pre(
dataset=dataset_train,
batch_size=128,
shuffle=True,
num_workers=4
)
dloader_val = data_loader(
dataset=dataset_val,
nKnovel=opt.test_way,
nKbase=0,
nExemplars=opt.val_shot, # num training examples per novel category
nTestNovel=opt.val_query * opt.test_way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=0,
epoch_size=1 * opt.val_episode, # num of batches per epoch
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
print(list(dict(propa_head.named_parameters()).keys()))
optimizer = torch.optim.SGD([{'params': embedding_net.parameters()},
{'params': pre_head.parameters()}], lr=0.1, momentum=0.9, \
weight_decay=5e-4, nesterov=True)
lambda_epoch = lambda e: 1.0 if e < 60 else (0.1 if e < 80 else 0.01 if e < 90 else (0.001))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)
max_val_acc = 0.0
max_test_acc = 0.0
timer = Timer()
x_entropy = torch.nn.CrossEntropyLoss()
for epoch in range(1, opt.num_epoch + 1):
# Train on the training split
lr_scheduler.step()
# Fetch the current epoch's learning rate
epoch_learning_rate = 0.1
for param_group in optimizer.param_groups:
epoch_learning_rate = param_group['lr']
log(log_file_path, 'Train Epoch: {}\tLearning Rate: {:.4f}'.format(
epoch, epoch_learning_rate))
_, _, _, _ = [x.train() for x in (embedding_net, propa_head, pre_head, cls_head)]
train_accuracies = []
train_losses = []
for i, batch in enumerate(tqdm(dloader_train), 1):
data, labels = [x.cuda() for x in batch]
if opt.pre_head == 'LinearNet' or opt.pre_head == 'CosineNet':
emb = embedding_net(data)
logit = pre_head(emb)
smoothed_one_hot = one_hot(labels.reshape(-1), train_way)
smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (1 - smoothed_one_hot) * opt.eps / (train_way - 1)
log_prb = F.log_softmax(logit.reshape(-1, train_way), dim=1)
loss = -(smoothed_one_hot * log_prb).sum(dim=1)
loss = loss.mean()
acc = count_accuracy(logit.reshape(-1, train_way), labels.reshape(-1))
elif opt.pre_head == 'LinearRotateNet' or opt.pre_head == 'DistRotateNet':
x_ = []
y_ = []
a_ = []
for j in range(data.shape[0]):
x90 = data[j].transpose(2, 1).flip(1)
x180 = x90.transpose(2, 1).flip(1)
x270 = x180.transpose(2, 1).flip(1)
x_ += [data[j], x90, x180, x270]
y_ += [labels[j] for _ in range(4)]
a_ += [torch.tensor(0), torch.tensor(1), torch.tensor(2), torch.tensor(3)]
x_ = Variable(torch.stack(x_, 0)).cuda()
y_ = Variable(torch.stack(y_, 0)).cuda()
a_ = Variable(torch.stack(a_, 0)).cuda()
emb = embedding_net(x_)
# print(emb.shape)
logit = pre_head(emb, use_cls=True)
logit_rotate = pre_head(emb, use_cls=False)
smoothed_one_hot = one_hot(y_.reshape(-1), train_way)
smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (1 - smoothed_one_hot) * opt.eps / (train_way - 1)
log_prb = F.log_softmax(logit.reshape(-1, train_way), dim=1)
loss = -(smoothed_one_hot * log_prb).sum(dim=1)
loss = loss.mean()
rloss = F.cross_entropy(input=logit_rotate, target=a_)
loss = 0.5 * loss + 0.5 * rloss
acc = count_accuracy(logit.reshape(-1, train_way), y_.reshape(-1))
else:
print("Cannot recognize the pre_head type")
assert (False)
train_accuracies.append(acc.item())
train_losses.append(loss.item())
if (i % 100 == 0):
train_acc_avg = np.mean(np.array(train_accuracies))
log(log_file_path, 'Train Epoch: {}\tBatch: [{}]\tLoss: {}\tAccuracy: {} % ({} %)'.format(
epoch, i, loss.item(), train_acc_avg, acc))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Evaluate on the validation split
_, _, _, _ = [x.eval() for x in (embedding_net, propa_head, pre_head, cls_head)]
val_accuracies = []
val_losses = []
for i, batch in enumerate(tqdm(dloader_val(opt.seed)), 1):
data_support, labels_support, \
data_query, labels_query, _, _ = [
x.cuda() for x in batch]
test_n_support = opt.test_way * opt.val_shot
test_n_query = opt.test_way * opt.val_query
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, test_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, test_n_query, -1)
logit_query = cls_head(emb_query, emb_support, labels_support, opt.test_way, opt.val_shot)
loss = x_entropy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
acc = count_accuracy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
val_accuracies.append(acc.item())
val_losses.append(loss.item())
val_acc_avg = np.mean(np.array(val_accuracies))
val_acc_ci95 = 1.96 * np.std(np.array(val_accuracies)) / np.sqrt(opt.val_episode)
val_loss_avg = np.mean(np.array(val_losses))
if val_acc_avg > max_val_acc:
max_val_acc = val_acc_avg
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()}, \
os.path.join(opt.save_path, 'best_pretrain_model.pth'))
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} % (Best)' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
else:
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} %' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'last_pretrain_epoch.pth'))
if epoch % opt.save_epoch == 0:
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'epoch_{}_pretrain.pth'.format(epoch)))
def part_prototype(opt, dataset_train, dataset_val, dataset_test, data_loader):
data_loader_pre = torch.utils.data.DataLoader
# Dataloader of Gidaris & Komodakis (CVPR 2018)
dloader_train = data_loader_pre(
dataset=dataset_train,
batch_size=1,
shuffle=False,
num_workers=0
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(os.path.join(opt.save_path, 'best_pretrain_model.pth'))
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
embs = []
for i, batch in enumerate(tqdm(dloader_train), 1):
data, labels = [x.cuda() for x in batch]
with torch.no_grad():
emb = embedding_net(data)
embs.append(emb)
embs = torch.cat(embs, dim=0)
with open('./data/mini_imagenet_part_prior_train.pickle', 'rb') as handle:
part_prior = pickle.load(handle)
train_class_name_file = './data/mini_imagenet_catname2label_train.pickle'
with open(train_class_name_file, 'rb') as handle:
catname2label_train = pickle.load(handle)
a = 1
attr_feature = {}
for attr_id in part_prior['attribute_id_class_dict'].keys():
if attr_id not in [part_prior['wnids2id'][wnid] for wnid in part_prior['all_wnids']]:
attr_im_id = []
for sel_class_id in list(set(part_prior['attribute_id_class_dict'][attr_id])):
if sel_class_id in [part_prior['wnids2id'][wnid] for wnid in part_prior['wnids_train']]:
sel_class = catname2label_train[part_prior['id2wnids'][sel_class_id]]
attr_im_id.extend(dataset_train.label2ind[sel_class])
attr_im = embs[attr_im_id, :]
mean = torch.mean(attr_im, dim=0).unsqueeze(dim=0)
std = torch.std(attr_im, dim=0).unsqueeze(dim=0)
attr_feature[attr_id] = {'mean': mean, 'std':std}
with open(os.path.join(opt.save_path, "mini_imagenet_metapart_feature.pickle"), 'wb') as handle:
pickle.dump(attr_feature, handle, protocol=pickle.HIGHEST_PROTOCOL)
class_feature = {}
for class_id in part_prior['class_attribute_id_dict'].keys():
if class_id in [part_prior['wnids2id'][wnid] for wnid in part_prior['wnids_train']]:
sel_class = catname2label_train[part_prior['id2wnids'][class_id]]
class_im = embs[dataset_train.label2ind[sel_class], :]
mean = torch.mean(class_im, dim=0).unsqueeze(dim=0)
std = torch.std(class_im, dim=0).unsqueeze(dim=0)
class_feature[sel_class] = {'mean': mean, 'std':std}
with open(os.path.join(opt.save_path, "mini_imagenet_class_feature.pickle"), 'wb') as handle:
pickle.dump(class_feature, handle, protocol=pickle.HIGHEST_PROTOCOL)
def meta_inference(opt, dataset_train, dataset_val, dataset_test, data_loader):
data_loader_pre = torch.utils.data.DataLoader
# Dataloader of Gidaris & Komodakis (CVPR 2018)
dloader_train = data_loader_pre(
dataset=dataset_train,
batch_size=128,
shuffle=True,
num_workers=0
)
dloader_val = data_loader(
dataset=dataset_val,
nKnovel=opt.test_way,
nKbase=0,
nExemplars=opt.val_shot, # num training examples per novel category
nTestNovel=opt.val_query * opt.test_way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=0,
epoch_size=1 * opt.val_episode, # num of batches per epoch
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(os.path.join(opt.save_path, 'best_pretrain_model.pth'))
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
cls_head.eval()
optimizer = torch.optim.SGD([{'params': propa_head.parameters()}], lr=0.1, momentum=0.9, \
weight_decay=5e-4, nesterov=True)
lambda_epoch = lambda e: 1.0 if e < 15 else (0.1 if e < 40 else 0.01 if e < 80 else (0.001))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)
train_losses = []
x_entropy = torch.nn.CrossEntropyLoss()
max_loss = 10e16
max_val_acc = 0
max_test_acc = 0
for epoch in range(0, opt.num_epoch + 1):
# Train on the training split
lr_scheduler.step()
# Fetch the current epoch's learning rate
epoch_learning_rate = 0.1
for param_group in optimizer.param_groups:
epoch_learning_rate = param_group['lr']
log(log_file_path, 'Train Epoch: {}\tLearning Rate: {:.4f}'.format(
epoch, epoch_learning_rate))
propa_head.train()
train_accuracies = []
for i, batch in enumerate(tqdm(dloader_train), 1):
data, labels = [x.cuda() for x in batch]
nb, ns, nc, nw, nh = data.shape
with torch.no_grad():
data = data.reshape(nb*ns, nc, nw, nh)
emb = embedding_net(data)
emb = emb.reshape(nb, ns, -1)
emb = emb.mean(dim=1)
proto, proto_true = propa_head(emb, labels)
loss = F.mse_loss(proto, proto_true)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
if (i % 10 == 0):
train_loss_avg = np.mean(np.array(train_losses))
log(log_file_path, 'Train Epoch: {}\tBatch: [{}]\tLoss: {}({})'.format(
epoch, i, loss.item(), train_loss_avg))
# Evaluate on the validation split
_, _, _, _ = [x.eval() for x in (embedding_net, propa_head, pre_head, cls_head)]
val_accuracies = []
val_losses = []
for i, batch in enumerate(tqdm(dloader_val(opt.seed)), 1):
data_support, labels_support, \
data_query, labels_query, k_all, _ = [
x.cuda() for x in batch]
test_n_support = opt.test_way * opt.val_shot
test_n_query = opt.test_way * opt.val_query
with torch.no_grad():
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, test_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, test_n_query, -1)
logit_query = cls_head(k_all, propa_head, emb_query, emb_support, labels_support, opt.test_way, opt.val_shot, is_scale=True)
loss = x_entropy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
acc = count_accuracy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
val_accuracies.append(acc.item())
val_losses.append(loss.item())
val_acc_avg = np.mean(np.array(val_accuracies))
val_acc_ci95 = 1.96 * np.std(np.array(val_accuracies)) / np.sqrt(opt.val_episode)
val_loss_avg = np.mean(np.array(val_losses))
if val_acc_avg > max_val_acc:
max_val_acc = val_acc_avg
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()}, \
os.path.join(opt.save_path, 'best_pretrain_model_meta_infer_val_{}w_{}s_{}.pth'.format(opt.test_way, opt.val_shot, opt.head)))
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} % (Best)' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
else:
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} %' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'last_pretrain_epoch_meta_infer.pth'))
if epoch % opt.save_epoch == 0:
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'epoch_{}_pretrain_meta_infer.pth'.format(epoch)))
def meta_train(opt, dataset_train, dataset_val, dataset_test, data_loader):
# Dataloader of Gidaris & Komodakis (CVPR 2018)
dloader_train = data_loader(
dataset=dataset_train,
nKnovel=opt.train_way,
nKbase=0,
nExemplars=opt.train_shot, # num training examples per novel category
nTestNovel=opt.train_way * opt.train_query, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=opt.episodes_per_batch,
num_workers=4,
epoch_size=opt.episodes_per_batch * 100, # num of batches per epoch
)
dloader_val = data_loader(
dataset=dataset_val,
nKnovel=opt.test_way,
nKbase=0,
nExemplars=opt.val_shot, # num training examples per novel category
nTestNovel=opt.val_query * opt.test_way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=0,
epoch_size=1 * opt.val_episode, # num of batches per epoch
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(os.path.join(opt.save_path, 'best_pretrain_model_meta_infer_val_{}w_{}s_{}.pth'.format(opt.test_way, opt.val_shot, opt.head)))
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
propa_head.load_state_dict(saved_models['propa_head'])
propa_head.eval()
optimizer = torch.optim.SGD([{'params': embedding_net.parameters()},
{'params': propa_head.parameters()},
{'params': cls_head.parameters()}], lr=0.0001, momentum=0.9, \
weight_decay=5e-4, nesterov=True)
lambda_epoch = lambda e: 1.0 if e < 15 else (0.1 if e < 25 else 0.01 if e < 30 else (0.001))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)
max_val_acc = 0.0
max_test_acc = 0.0
timer = Timer()
x_entropy = torch.nn.CrossEntropyLoss()
for epoch in range(0, opt.num_epoch + 1):
if epoch != 0:
# Train on the training split
lr_scheduler.step()
# Fetch the current epoch's learning rate
epoch_learning_rate = 0.1
for param_group in optimizer.param_groups:
epoch_learning_rate = param_group['lr']
log(log_file_path, 'Train Epoch: {}\tLearning Rate: {:.4f}'.format(
epoch, epoch_learning_rate))
_, _, _ = [x.train() for x in (embedding_net, propa_head, cls_head)]
train_accuracies = []
train_losses = []
for i, batch in enumerate(tqdm(dloader_train(epoch)), 1):
data_support, labels_support, \
data_query, labels_query, k_all, _ = [
x.cuda() for x in batch]
train_n_support = opt.train_way * opt.train_shot
train_n_query = opt.train_way * opt.train_query
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(opt.episodes_per_batch, train_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(opt.episodes_per_batch, train_n_query, -1)
logit_query = cls_head(k_all, propa_head, emb_query, emb_support, labels_support, opt.train_way, opt.train_shot, is_scale=False)
smoothed_one_hot = one_hot(labels_query.reshape(-1), opt.train_way)
smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (1 - smoothed_one_hot) * opt.eps / (opt.train_way - 1)
log_prb = F.log_softmax(logit_query.reshape(-1, opt.train_way), dim=1)
loss = -(smoothed_one_hot * log_prb).sum(dim=1)
loss = loss.mean()
acc = count_accuracy(logit_query.reshape(-1, opt.train_way), labels_query.reshape(-1))
train_accuracies.append(acc.item())
train_losses.append(loss.item())
if (i % 10 == 0):
train_acc_avg = np.mean( | np.array(train_accuracies) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
from six.moves import urllib
import pandas as pd
HOUSING_PATH = "datasets/housing"
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# In[2]:
housing = load_housing_data()
# housing =
housing.head()
# In[3]:
housing.info()
# In[4]:
housing.describe()
# In[5]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(10,7))
plt.show()
# In[6]:
import numpy as np
def split_train_set(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_set(housing, 0.2)
print("Train: ", len(train_set), "+ Test: ", len(test_set))
# In[7]:
import hashlib
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))
return data.loc[~in_test_set], data.loc[in_test_set]
housing_with_id = housing.reset_index() # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# In[8]:
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# In[9]:
housing.hist(column='median_income', bins=10)
plt.show()
# In[10]:
housing['income_cat'] = np.ceil(housing['median_income']/1.5)
housing.hist('income_cat', bins=10)
plt.show()
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
housing.hist('income_cat')
plt.show()
# In[11]:
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.iloc[train_index]
strat_test_set = housing.iloc[test_index]
for set in (strat_train_set, strat_test_set):
set.drop(columns='income_cat', inplace=True)
# In[12]:
housing = strat_train_set.copy()
housing.describe()
# In[13]:
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
# In[14]:
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.2,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True)
# In[15]:
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# In[16]:
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# In[17]:
attributes = ["median_house_value", "households", "total_bedrooms", "population"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# In[18]:
housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1)
# In[19]:
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# In[20]:
housing = strat_train_set.drop(columns="median_house_value")
housing_labels = strat_train_set["median_house_value"].copy()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
housing_num = housing.drop(columns="ocean_proximity")
# imputer.fit(housing_num)
# print(imputer.statistics_)
# X = imputer.transform(housing_num)
X = imputer.fit_transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
# housing_tr.describe()
# In[21]:
# housing["ocean_proximity"].value_counts()
# from sklearn.preprocessing import LabelEncoder
# encoder = LabelEncoder()
# housing_cat_encoded = encoder.fit_transform(housing["ocean_proximity"])
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
# print(housing["ocean_proximity"].to_numpy().reshape(-1,1).shape)
housing_cat_1hot = encoder.fit_transform(housing["ocean_proximity"].to_numpy().reshape(-1,1))
print(type(housing_cat_1hot))
print(encoder.categories_)
# dfgdh = DataFrameSelector(["ocean_proximity"]).fit_transform(housing)
# print(np.unique(dfgdh))
# from sklearn.preprocessing import LabelBinarizer
# encoder = LabelBinarizer()
# housing_cat_1hot = encoder.fit_transform(housing["ocean_proximity"])
# print(housing_cat_1hot)
# housing_cat_1hot = pd.get_dummies(housing, columns=['ocean_proximity'])
# housing_cat_1hot.head()
# In[22]:
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
housing_extra_attribs = pd.DataFrame(housing_extra_attribs)
housing_extra_attribs.head()
# print(type(housing.columns))
# In[23]:
from sklearn.base import BaseEstimator, TransformerMixin
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
# selector = DataFrameSelector(["ocean_proximity"])
# housing_cat_select = selector.fit_transform(housing)
# print(housing_cat_select[:5])
# encoder = LabelBinarizer()
# housing_cat_1hot = encoder.fit_transform(housing_cat_select)
# print(housing_cat_1hot[:5])
# In[24]:
from sklearn.preprocessing import OneHotEncoder
class CatOneHotEncoder (BaseEstimator, TransformerMixin):
# from sklearn.preprocessing import OneHotEncoder
def __init__(self, sparse = False):
self.sparse = sparse
encoder = OneHotEncoder(sparse=self.sparse)
def fit(self, X, y=None):
return self
def transform(self, X):
encoder = OneHotEncoder(sparse=self.sparse)
return encoder.fit_transform(X.reshape(-1,1))
def get_categories(self, X):
return list(np.unique(X))
# def categories_(self, X):
# # encoder = OneHotEncoder(sparse=self.sparse)
# # encoder.fit_transform(X.reshape(-1,1))
# return ["encoder.categories_"]
encoder = CatOneHotEncoder()
encoder.fit(DataFrameSelector(['ocean_proximity']).fit_transform(housing))
# print(encoder.fit_transform(DataFrameSelector(['ocean_proximity']).fit_transform(housing)))
print(encoder.get_categories(housing['ocean_proximity']))
# selector = DataFrameSelector(["ocean_proximity"])
# housing_cat_select = selector.fit_transform(housing)
# print(housing_cat_select[:5])
# encoder = CatEncoder()
# housing_cat_1hot = encoder.fit_transform(housing_cat_select)
# print(type(housing_cat_1hot))
# In[84]:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import FeatureUnion
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
('selector', DataFrameSelector(list(housing_num))),
('imputer', SimpleImputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
# housing_prepared = num_pipeline.fit_transform(housing)
# print(housing_prepared[:5])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('1hot_encoder', CatOneHotEncoder())
])
# housing_prepared = cat_pipeline.fit_transform(housing)
# print(housing_prepared[:5])
full_pipeline = FeatureUnion(transformer_list=[
('num_pipeline', num_pipeline),
('cat_pipeline', cat_pipeline)
])
# pipeline = Pipeline([
# ('selector', DataFrameSelector(list(housing_cat_1hot))),
# ('imputer', SimpleImputer(strategy="median")),
# ('attribs_adder', CombinedAttributesAdder()),
# ('std_scaler', StandardScaler()),
# ])
housing_prepared = full_pipeline.fit_transform(housing)
# print(type(housing_prepared))
# print(housing_prepared[:5])
pd.DataFrame(housing_prepared).head()
# In[27]:
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
# some_data = housing.iloc[:10]
# some_labels = housing_labels[:10]
# some_data_prepared = full_pipeline.fit_transform(some_data)
# print(pd.DataFrame(some_data_prepared[:5]))
# print("Predictions:\"", lin_reg.predict(some_data_prepared))
# print(some_data_prepared.shape)
# print(housing_prepared.shape)
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse)
# In[28]:
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_rmse = np.sqrt(mean_squared_error(housing_labels, housing_predictions))
print(tree_rmse)
# In[29]:
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)
# print(rmse_scores)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
display_scores(tree_rmse_scores)
# In[30]:
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
# In[31]:
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared, housing_labels)
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)
# In[33]:
print(np.sqrt(mean_squared_error(forest_reg.predict(housing_prepared), housing_labels)))
# In[41]:
from sklearn.model_selection import GridSearchCV
# param_grid = [
# {'n_estimators':[3, 10, 30], 'max_features':[2, 4, 6, 8]},
# {'bootstrap':[False], 'n_estimators':[3, 10], 'max_features':[2, 4, 6]}
# ]
param_grid = [
{'n_estimators':[30, 100], 'max_features':[6, 8, 10]}
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid,
cv=5, scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared, housing_labels)
# In[34]:
# For quick use when restarting the kernel
from sklearn.model_selection import GridSearchCV
# param_grid = [
# {'n_estimators':[3, 10, 30], 'max_features':[2, 4, 6, 8]},
# {'bootstrap':[False], 'n_estimators':[3, 10], 'max_features':[2, 4, 6]}
# ]
param_grid = [
{'n_estimators':[100], 'max_features':[6]}
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid,
cv=5, scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared, housing_labels)
# In[35]:
print(np.sqrt(-grid_search.best_score_))
print(grid_search.best_params_)
# In[44]:
cvres = grid_search.cv_results_
for mean_score, params in sorted(list(zip(cvres['mean_test_score'], cvres['params'])), reverse=True):
print(np.sqrt(-mean_score), params)
# In[36]:
feature_importances = grid_search.best_estimator_.feature_importances_
# print(feature_importances)
extra_attribs = ["rooms_per_household", "pop_per_household", "bedrooms_per_room"]
cat_one_hot_attribs = encoder.get_categories(housing['ocean_proximity'])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
attribs_importance_list = sorted(list(zip(feature_importances, attributes)), reverse=True)
for element in attribs_importance_list:
print(element)
_, attribs_importance_order = zip(*attribs_importance_list)
attribs_importance_order = list(attribs_importance_order)
print(attribs_importance_order)
# In[37]:
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop(columns="median_house_value")
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_rmse = np.sqrt(mean_squared_error(y_test, final_predictions))
print(final_rmse)
# In[73]:
from sklearn.svm import SVR
SupportVectorMachine = SVR()
svr_param_grid = [
{'kernel':["linear"], 'C':[0.3, 1, 3]},
{'kernel':["rbf"], 'C':[1, 3], 'gamma':[0.03, 0.1]}
]
svr_grid_search = GridSearchCV(SupportVectorMachine, svr_param_grid,
cv=5, scoring="neg_mean_squared_error")
svr_grid_search.fit(housing_prepared, housing_labels)
# In[84]:
svr_cv_results = svr_grid_search.cv_results_
# for sorted(score, param in svr_cv_results['mean_test_score'], svr_cv_results['params'], reverse=True):
# print(score, param)
for element in sorted(list(zip(np.sqrt(-svr_cv_results['mean_test_score']), svr_cv_results['params']))):
print(element)
# print(svr_cv_results['params'])
# In[85]:
svr_param_grid = [
{'kernel':["linear"], 'C':[3, 10, 30, 100]},
{'kernel':["rbf"], 'C':[10, 3], 'gamma':[3, 1]}
]
svr_grid_search = GridSearchCV(SupportVectorMachine, svr_param_grid,
cv=5, scoring="neg_mean_squared_error")
svr_grid_search.fit(housing_prepared, housing_labels)
# In[87]:
svr_cv_results = svr_grid_search.cv_results_
# for sorted(score, param in svr_cv_results['mean_test_score'], svr_cv_results['params'], reverse=True):
# print(score, param)
for element in sorted(list(zip(np.sqrt(-svr_cv_results['mean_test_score']), svr_cv_results['params']))):
print(element)
# print(svr_cv_results['params'])
# In[38]:
housing_reduced = pd.DataFrame(housing_prepared)
housing_reduced.columns = attributes
housing_reduced = housing_reduced[attribs_importance_order[:8]]
housing_reduced.head()
# housing_prepared_small = housing_prepared[attribs_importance_order[:8]]
# attribs_importance_order[:8]
# pd.DataFrame(housing_prepared).head()
# print(type(housing_prepared))
# In[39]:
class ReduceFeatures(BaseEstimator, TransformerMixin):
def __init__(self, attribs, attribs_order, num=8):
self.attribs = attribs
self.attribs_order = attribs_order
self.num = num
def fit(self, X, y=None):
return self
def transform(self, X, y=None) :
X_dataframe = pd.DataFrame(X)
X_dataframe.columns = self.attribs
X_reduced = X_dataframe[self.attribs_order[:(self.num)]]
return X_reduced.values
reducer = ReduceFeatures(attributes, attribs_importance_order)
housing_reduced = reducer.fit_transform(housing_prepared)
print(housing_reduced[:5])
# print(housing_reduced.head())
# housing_reduced = housing_reduced[attribs_importance_order[:8]]
# print(housing_reduced[:5])
# In[40]:
reducer_pipeline = Pipeline([
('full_pipeline', full_pipeline),
('reducer', ReduceFeatures(attributes, attribs_importance_order))
])
housing_reduced = reducer_pipeline.fit_transform(housing)
print(housing_reduced[:5])
# In[70]:
# param_grid = [
# {'n_estimators':[30, 100]}
# ]
param_grid = [
{'n_estimators':[30, 100]}
]
grid_search_red = GridSearchCV(forest_reg, param_grid,
cv=5, scoring='neg_mean_squared_error')
grid_search_red.fit(housing_reduced, housing_labels)
cvres_red = grid_search_red.cv_results_
for mean_score, params in sorted(list(zip(cvres_red['mean_test_score'], cvres_red['params'])), reverse=True):
print(np.sqrt(-mean_score), params)
# In[87]:
final_model_red = grid_search_red.best_estimator_
X_test = strat_test_set.drop(columns="median_house_value")
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = reducer_pipeline.transform(X_test)
final_predictions = final_model_red.predict(X_test_prepared)
final_rmse = np.sqrt(mean_squared_error(y_test, final_predictions))
print(final_rmse)
# In[88]:
final_model_red = RandomForestRegressor(n_estimators=100)
final_model_red.fit(reducer_pipeline.fit_transform(housing), housing_labels)
X_test = strat_test_set.drop(columns="median_house_value")
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = reducer_pipeline.transform(X_test)
final_predictions = final_model_red.predict(X_test_prepared)
final_rmse = np.sqrt(mean_squared_error(y_test, final_predictions))
print(final_rmse)
# In[ ]:
# In[41]:
list(attributes)
# In[95]:
class HouseValueQuirksElimination(BaseEstimator, TransformerMixin):
def __init__(self, prices):
self.prices = prices.to_numpy()
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
for i in [500000, 450000, 350000]:
if i == 500000:
ix, = np.where(self.prices==i)
print(len(ix))
else:
ix_i, = np.where(self.prices==i)
print(len(ix_i))
ix = np.append(ix, ix_i)
return np.delete(X, ix, 0)
quirks_remover = HouseValueQuirksElimination(housing_labels)
# housing_quirks = quirks_remover.fit_transform(housing)
# In[117]:
some_data = DataFrameSelector(list(housing)).fit_transform(housing)
selector = housing_labels.to_numpy()
# print(type(selector))
# print(some_data[:5])
# i, = np.where(selector==500000)
# print(len(i))
# print(type(i))
# print(i.shape)
# ix = np.empty(1)
# print(type(ix))
# print(ix.shape)
# print(ix)
# del ix
for i in [500001, 500000, 450000, 350000]:
if i == 500000:
ix, = np.where(selector==i)
print(len(ix))
else:
ix_i, = np.where(selector==i)
print(len(ix_i))
ix = np.append(ix, ix_i)
print(len(ix))
# if ix exist:
# print("Is")
# else:
# print("Not")
some_data_prepared = np.delete(some_data, ix, 0)
print(some_data.shape)
print(some_data_prepared.shape)
# plot()
# In[131]:
print(strat_train_set['median_house_value'].value_counts().head(10))
quirks = [500001, 450000, 350000]
strat_train_set_quirks = strat_train_set[~strat_train_set['median_house_value'].isin(quirks)]
strat_test_set_quirks = strat_test_set[~strat_test_set['median_house_value'].isin(quirks)]
# strat_train_set_quirks.describe()
# strat_test_set_quirks.describe()
# print(~strat_train_set['median_house_value'].isin([500000, 450000, 350000]))
# print(type(strat_train_set.index[strat_train_set['median_house_value'==500000]]))
# df.index[df['BoolCol'] == True].tolist()
# idx = strat_train_set[1].index
# print(idx)
# print(strat_test_set[strat_test_set['median_house_value'].isin([500000])])
# strat_train_set.head()
# In[136]:
housing_quirks = strat_train_set_quirks.drop(columns='median_house_value')
housing_labels_quirks = strat_train_set_quirks['median_house_value'].copy()
housing_quirks_prepared = full_pipeline.fit_transform(housing_quirks)
forest_reg_quirks = RandomForestRegressor()
param_grid = [
{'n_estimators':[100], 'max_features':[6, 8, 10]}
]
grid_search_quirks = GridSearchCV(forest_reg_quirks, param_grid,
cv=5, scoring='neg_mean_squared_error')
grid_search_quirks.fit(housing_quirks_prepared, housing_labels_quirks)
cvres_quirks = grid_search_quirks.cv_results_
for mean_score, params in sorted(list(zip(cvres_quirks['mean_test_score'], cvres_quirks['params'])), reverse=True):
print( | np.sqrt(-mean_score) | numpy.sqrt |
#!/usr/bin/python
import scipy
import scipy.signal
import numpy
"""
Control theory helper functions library.
Wraps scipy routines to provide control functions including:
- Pole placement
- Steady-state LQR gain
- Steady-state Kalman gain
- Controllability and observability matrices
- Continuous to discrete transformations for the system and noise matrices
Inspired by 971's control helpers library
"""
__author__ = '<NAME> (<EMAIL>)'
def _validate_system(A, B, C, D):
if A is not None:
A = numpy.asmatrix(A)
if B is not None:
B = numpy.asmatrix(B)
if C is not None:
C = numpy.asmatrix(C)
if D is not None:
D = numpy.asmatrix(D)
assert A is not None and A.shape[0] == A.shape[1], "A must be square"
if B is not None:
assert B.shape[0] == A.shape[0], "A.shape %s and B.shape %s must be compatible" % (A.shape, B.shape)
if C is not None:
assert C.shape[1] == A.shape[0], "A.shape %s and C.shape %s must be compatible" % (A.shape, C.shape)
if B is not None and C is not None and D is not None:
assert D.shape[0] == C.shape[0], "C.shape %s and D.shape %s must be compatible" % (C.shape, D.shape)
assert D.shape[1] == B.shape[1], "B.shape %s and D.shape %s must be compatible" % (B.shape, D.shape)
def place(A, B, poles):
"""
Find the m*n matrix K such that the poles (eigenvalues) of A-BK are at the
desired locations. Works on both discrete-time and continuous-time systems.
Note: If you are using continuous-time matrices, poles should be negative
to acheive stability while with discrete-time matrices they should just be
less than 1
Args:
A: n*n system dynamics matrix
B: n*m control signal matrix
poles: complex array of desired pole locations
For every complex pole a+bi, its conjugate a-bi must also be a pole
Returns:
K: m*n gains matrix such that u = -Kx
"""
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
_validate_system(A, B, None, None)
assert len(poles) == A.shape[0], "The number of poles (%i) must be equal to the rank of the system (%i)." % (len(poles), A.shape[0])
assert numpy.linalg.matrix_rank(controllability(A, B)) == A.shape[0], "System must be completely controllable to do pole placement."
result = scipy.signal.place_poles(A, B, poles)
for req, res in zip(result.requested_poles, result.computed_poles):
if abs(req - res) > 1e-3:
print("Warning: Pole %s could not be assigned as given and was instead assigned as %s" % (req, res))
return result.gain_matrix
def controllability(A, B):
"""
Calculate the controllability matrix of the system defined by A and B.
Works on both discrete-time and continuous-time systems.
In a fully controllable system, rank(controllability(A, B)) == n
Args:
A: n*n system dynamics matrix
B: n*m control signal matrix
Returns:
E: n*nm controllability matrix
"""
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
_validate_system(A, B, None, None)
n = A.shape[0]
m = B.shape[1]
E = numpy.asmatrix(numpy.zeros((n, n*m)))
x = B
for i in range(0, n):
j = i * m
E[:n, j:j+m] = x
x = A * x
return E
def observability(A, C):
"""
Calculate the observability matrix of the system defined by A and C.
Works on both discrete-time and continuous-time systems.
In a fully observable system, rank(controllability(A, C)) == n
Observability is the dual of controllability, meaning that
observability(A, C) = controllability(A.T, C.T).T
Args:
A: n*n system dynamics matrix
C: n*q measurement signal matrix
Returns:
O: nq*n observability matrix
"""
A = numpy.asmatrix(A)
C = numpy.asmatrix(C)
_validate_system(A, None, C, None)
n = A.shape[0]
q = C.shape[0]
O = numpy.asmatrix(numpy.zeros((n*q, n)))
y = C
for i in range(0, n):
j = i * q
O[j:j+q, :n] = y
y = y * A
return O
def c2d(A, B, dt, Q = None, R = None):
"""
Transform the continuous-time system dx/dt = Ax + Bu into the discrete-time
formulation x(n+1) = Ax(n) + Bu(n).
Args:
A: n*n continuous-time system dynamics matrix
B: n*m continuous-time control signal matrix
dt: time step of the discretized process
Q (optional): n*n continuous-time process noise covariance matrix
R (optional): q*q continuous-time measurement noise covariance matrix
Returns
Tuple (A_d, B_d, Q_d, R_d)
A_d: n*n discrete-time system dynamics matrix
B_d: n*m discrete-time control signal matrix
Q_d: n*n discrete-time process noise covariance matrix (None if no Q given)
R_d: q*q discrete-time measurement noise covariance matrix (None if no R given)
"""
_validate_system(A, B, None, None)
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
n = A.shape[0]
m = B.shape[1]
F = numpy.asmatrix(numpy.zeros((n + m, n + m)))
F[:n, :n] = A
F[:n, n:n+m] = B
G = scipy.linalg.expm(F * dt)
A_d = G[:n, :n]
B_d = G[:n, n:n+m]
Q_d = R_d = None
if Q is not None and R is not None:
Q = numpy.asmatrix(Q)
R = numpy.asmatrix(R)
assert Q.shape == A.shape, "The dimensions of Q %s must match those of A %s" % (Q.shape, A.shape)
assert R.shape[0] == R.shape[1], "R must be square but is instead %ix%i" % (R.shape[0], R.shape[1])
H = numpy.asmatrix(numpy.zeros((n+n, n+n)))
H[:n, :n] = -A
H[n:n+n, n:n+n] = A
H[:n, n:n+n] = Q
I = numpy.asmatrix(scipy.linalg.expm(H * dt))
Q_d = numpy.asmatrix(I[n:n+n, n:n+n].T * I[:n, n:n+n])
R_d = | numpy.asmatrix(R / dt) | numpy.asmatrix |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
l = 0.1
n = 10
T0 = 0
T1s = 40
T2s = 40
dx = l/10
alpha = 0.0001
t_final = 60
dt = 0.1
x = np.linspace(dx/2, l-dx/2, n)
T = np.ones(n)*T0
dTdt = | np.empty(n) | numpy.empty |
from collections import OrderedDict
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
import numpy as np
import json
import multiprocessing as mp
from tqdm import tqdm
def pretty_print(ob):
print(json.dumps(ob, indent=4))
def euler_to_rot(angles):
# Euler ZYX to Rot
# Note that towr has (x, y, z) order
x = angles[0]
y = angles[1]
z = angles[2]
ret = np.array([
np.cos(y) * np.cos(z),
np.cos(z) * np.sin(x) * np.sin(y) - np.cos(x) * np.sin(z),
np.sin(x) * np.sin(z) + np.cos(x) * np.cos(z) * np.sin(y),
np.cos(y) * np.sin(z),
np.cos(x) * np.cos(z) + | np.sin(x) | numpy.sin |
import unittest
import numpy as np
from pyml.linear_model.classification import sigmoid
from pyml.linear_model.classification import LogisticClassifier
class test_classification(unittest.TestCase):
def test_sigmoid(self):
result = sigmoid(np.array([0,2]))
true_result = np.array([0.5, 0.88079708])
np.testing.assert_almost_equal(result, true_result)
def test_propagate(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
test_dw = np.array([[0.99993216],[1.99980262]])
test_db = 0.49993523062470574
test_cost = 6.000064773192205
lc = LogisticClassifier()
grads, cost = lc.propagate(w, b, X, Y)
np.testing.assert_array_almost_equal(grads['dw'], test_dw)
np.testing.assert_array_almost_equal(grads['db'], test_db)
np.testing.assert_array_almost_equal(cost, test_cost)
def test_optimier(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
std_w = np.array([[0.1124579 ],[0.23106775]])
std_b = np.array(1.5593049248448891)
std_dw = np.array([[0.90158428],[1.76250842]])
std_db = np.array(0.4304620716786828)
std_cost = [6.000064773192205]
lc = LogisticClassifier(learning_rate = 0.009)
params, grads, costs = lc.optimize(w, b, X, Y, num_iterations= 100)
np.testing.assert_array_almost_equal(params['w'], std_w)
np.testing.assert_array_almost_equal(params['b'], std_b)
np.testing.assert_array_almost_equal(grads['dw'], std_dw)
np.testing.assert_array_almost_equal(grads['db'], std_db)
np.testing.assert_array_almost_equal(costs, std_cost)
def test_pred(self):
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
lc = LogisticClassifier()
lc.parameters['w'] = w
lc.parameters['b'] = b
y_pred = lc.predict(X.T)
std_y_pred = | np.array([1,1]) | numpy.array |
import numpy as np
from scipy.interpolate import LinearNDInterpolator, interp1d
from astropy import table
from astropy.table import Table, Column
import warnings
def get_track_meta(track, key="FeH"):
""" get meta info from a track """
assert key in track.meta.keys()
return track.meta[key]
def find_rank_1d(arr, val, sort=False):
""" return ind of the two elements in *arr* that bracket *val* """
if sort:
arr = np.sort(arr)
sub = np.where((arr[:-1] < val) & (arr[1:] >= val))[0]
assert len(sub) > 0
return np.hstack((sub, sub + 1))
def get_track_item_given_eeps(track, eeps):
""" return track items given a set of eeps """
ind = np.zeros((len(track, )), dtype=bool)
for eep in eeps:
ind |= track["_eep"] == eep
return track[ind]
def calc_weight(arr, val, norm=1.):
""" calculate normalized weight """
weight = np.abs(arr - val)
weight *= norm / np.sum(weight)
return np.array(weight)
def table_linear_combination(t, weight):
""" given weight, return the linear combination of each row for each column
"""
assert len(t) == len(weight)
new_cols = []
colnames = t.colnames
ncols = len(colnames)
for i in range(ncols):
if t.dtype[i] in (np.int, np.float):
colname = colnames[i]
new_cols.append(
Column(np.array([np.sum(t[colname].data * weight)]), colname))
return Table(new_cols)
class StarObject():
def __init__(self, t):
assert len(t) == 1
colnames = t.colnames
ncols = len(colnames)
for i in range(ncols):
self.__setattr__(colnames[i], t[colnames[i]].data[0])
class TrackSet:
""" a set of tracks """
data = []
eep_bounds = (1, 808)
default_coord = ["_lgmass", "_feh", "_lgage", "_eep"]
bci = None
def __init__(self, tracks,
metadict=dict(minit="initial_mass",
feh="FEH",
eep="EEPS",
mbol="Mbol")):
""" initialization of track set object """
self.metadict = metadict
self.data = np.array(tracks)
self.grid_minit = np.array(
[get_track_meta(track, metadict["minit"]) for track in tracks])
self.grid_feh = np.array(
[get_track_meta(track, metadict["feh"]) for track in tracks])
#self.grid_EEP = [get_track_meta(track, metadict["eep"]) for track in tracks]
# every track starts from EEP=1
self.grid_EEP0 = np.array([np.min(_["_eep"]) for _ in self.data])
self.grid_EEP1 = np.array([np.max(_["_eep"]) for _ in self.data])
self.u_minit = np.unique(self.grid_minit)
self.u_feh = np.unique(self.grid_feh)
self.min_minit = np.min(self.u_minit)
self.max_minit = np.max(self.u_minit)
self.min_feh = np.min(self.u_feh)
self.max_feh = np.max(self.u_feh)
self.min_eep = np.min(self.grid_EEP0)
self.max_eep = np.max(self.grid_EEP1)
def get_track4(self, mass_feh=(1.01, 0.01)):
""" return the 4 neighboring stellar tracks """
test_minit, test_feh = np.array(mass_feh, dtype=np.float)
# assert Minit [Fe/H] in range
try:
assert self.min_minit < test_minit <= self.max_minit
assert self.min_feh < test_feh <= self.max_feh
except AssertionError as ae:
return None
# 1. locate 4 tracks
ind_minit = find_rank_1d(self.u_minit, test_minit)
ind_feh = find_rank_1d(self.u_feh, test_feh)
val_minit = self.u_minit[ind_minit]
val_feh = self.u_feh[ind_feh]
ind_track = np.where(np.logical_and(
(self.grid_minit == val_minit[0]) | (
self.grid_minit == val_minit[1]),
(self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1])))[0]
track4 = self.data[ind_track]
return track4
def get_track4_unstructured(self, mass_feh=(1.01, 0.01)):
""" return the 4 neighboring stellar tracks given unstructured grid """
test_minit, test_feh = | np.array(mass_feh, dtype=np.float) | numpy.array |
# A weighted Energy Distance approach
from scipy.spatial import distance
import numpy as np
def WED(X, Y):
"""
Calculates the weighted Energy Distance between two sets of planetary systems (or any other user defined set).
Parameters
----------
X : list of 'n' planets (in d-dimensional phase space) in following format:
[(x_1,x_2,....,x_n, w_x)_1, (x_1,x_2,....,x_d, w_x)_2,....,(x_1,x_2,....,x_d, w_x)_n]
Y : list of 'm' planets (in d-dimensional phase space) in following format:
[(y_1,y_2,....,y_n, w_y)_1, (y_1,y_2,....,y_d, w_y)_2,....,(y_1,y_2,....,y_d, w_y)_n]
Returns
-------
Weighted Energy Distance
Examples
--------
from PASSta import WED
WED([(1,2,3),(1.1,2.1,3.1)], [(1,2,3),(1.2,2.2,3.2)]) #---> 0.274
WED([(1,2,3)], [(1,2,3),(1.2,2.2,3.2)]) #---> 0.388
"""
n, m = len(X), len(Y)
# Check if X or Y are empty
if n == 0 or m == 0:
raise ValueError("WED assumes both X and Y are not empty")
# Get phase space dimensional and check that all dimensions of X_i and Y_j are the same
xdim = len(X[0])
ydim = len(Y[0])
if xdim != ydim:
raise ValueError("Inconsistent planet phase space dimensions")
for x in X:
if xdim != len(x):
raise ValueError("All X elements must be of same size")
for y in Y:
if ydim != len(y):
raise ValueError("All Y elements must be of same size")
# Get X,Y weight vectors and their sums
W_x = np.array([xi[xdim-1] for xi in X])
W_y = np.array([yi[ydim-1] for yi in Y])
W_X, W_Y = sum(W_x), sum(W_y)
Xd = [x[:xdim-1] for x in X]
Yd = [y[:ydim-1] for y in Y]
A_DistMat = distance.cdist(Xd, Yd, 'euclidean')
A = sum(sum((np.outer(W_x, W_y) * A_DistMat))) / (W_X * W_Y)
B_DistMat = distance.cdist(Xd, Xd, 'euclidean')
B = sum(sum(( | np.outer(W_x, W_x) | numpy.outer |
import logging
import os
import time
from typing import List, Tuple
import numpy as np
import torch
from torch import Tensor
from ujson import load as load_json
from ..base import DataLoader, LocalDataset
class MNISTDataLoader(DataLoader):
def __init__(self, data_dir='./../../../data/MNIST', batch_size=10):
super().__init__(data_dir, batch_size, batch_size)
@staticmethod
def read_data(train_data_dir, test_data_dir):
"""
Parses data in given train and test data directories
assumes:
- the data in the input directories are .json files with
keys 'users' and 'user_data'
- the set of train set users is the same as the set of test set users
Return:
clients: list of non-unique client ids
groups: list of group ids; empty list if none found
train_data: dictionary of train data
test_data: dictionary of test data
"""
start_time = time.perf_counter()
groups = list()
train_data = dict()
test_data = dict()
for f in (file for file in os.listdir(train_data_dir) if file.endswith('.json')):
file_path = os.path.join(train_data_dir, f)
with open(file_path, 'r') as fd:
cdata = load_json(fd)
if 'hierarchies' in cdata:
groups.extend(cdata['hierarchies'])
train_data.update(cdata['user_data'])
for f in (file for file in os.listdir(test_data_dir) if file.endswith('.json')):
file_path = os.path.join(test_data_dir, f)
with open(file_path, 'r') as fd:
cdata = load_json(fd)
test_data.update(cdata['user_data'])
clients = sorted(cdata['users'])
logging.info(f'Time spent reading MNIST JSONs: {time.perf_counter() - start_time}')
return clients, groups, train_data, test_data
def batch_data(self, data, batch_size) -> List[Tuple[Tensor, Tensor]]:
"""
data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)
returns x, y, which are both numpy array of length: batch_size
"""
data_x = data['x']
data_y = data['y']
# randomly shuffle data
np.random.seed(100)
rng_state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(rng_state)
np.random.shuffle(data_y)
# loop through mini-batches
batch_data = list()
for i in range(0, len(data_x), batch_size):
batched_x = data_x[i:i + batch_size]
batched_y = data_y[i:i + batch_size]
batched_x = torch.from_numpy( | np.asarray(batched_x) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 13:12:06 2020
@author: peter
"""
import numpy as np
from pathlib import Path
import shutil
import json
import tifffile
import quantities as pq
import scipy.interpolate as interp
import scipy.ndimage as ndimage
import scipy.signal as signal
import pandas as pd
import datetime
import pdb
import re
import f.general_functions as gf
import f.ephys_functions as ef
def get_events_exclude_surround_events(
tc,
std,
surround_tc,
surround_std,
z_score=3,
surround_z=7,
exclude_first=0,
max_overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev = detect_events(tc, std, z_score=z_score, exclude_first=exclude_first)
surrounds_ev = detect_events(
tc, std, z_score=surround_z, exclude_first=exclude_first
)
excluded_dict = {}
dict_drop = []
for key in ev.keys():
if type(key) == str:
continue
if key not in surrounds_ev.keys():
continue
sur_e = surrounds_ev[key].T
e = ev[key].T
# if a detected surround event overlaps for more than max_overlap, then remove
# detects any overlaps
overlapping = np.logical_and(
e[:, 0, None] < sur_e[None, :, 1], e[:, 1, None] >= sur_e[None, :, 0]
)
if not np.any(overlapping):
continue
drop = []
wh = np.where(overlapping)
# now detect size of overlap and delete if proportionally greater than max overlap
for idx in range(len(wh[0])):
overlap = min(e[wh[0][idx], 1], sur_e[wh[1][idx], 1]) - max(
e[wh[0][idx], 0], sur_e[wh[1][idx], 0]
)
if overlap > max_overlap * (e[wh[0][idx], 1] - e[wh[0][idx], 0]):
drop.append(wh[0][idx])
# pdb.set_trace()
exc_e = np.array([x for ii, x in enumerate(e) if ii in drop])
keep_e = np.array([x for ii, x in enumerate(e) if ii not in drop])
excluded_dict[key] = exc_e.T
if len(keep_e) > 0:
ev[key] = keep_e.T
else:
dict_drop.append(key)
# delete empty fields
for key in dict_drop:
del ev[key]
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
# include the surround data
ev["surround_events"] = surrounds_ev
ev["excluded_events"] = excluded_dict
return ev
def get_events_exclude_simultaneous_events(
tc,
std,
z_score=3,
exclude_first=0,
max_events=5,
overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev, excluded_dict = detect_events_remove_simultaneous(
tc,
std,
z_score=z_score,
exclude_first=exclude_first,
max_overlap=overlap,
max_events=max_events,
)
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
ev["excluded_events"] = excluded_dict
ev["surround_events"] = excluded_dict
print("Check this - surrounds and exclude the same")
return ev
def detect_events_remove_simultaneous(
tc, std, z_score=3, exclude_first=0, max_events=5, max_overlap=0.5
):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
# now count simultaneous events and remove those where they are
num_events = np.sum(events, 0)
excluded_events = num_events > max_events
excluded_time = np.where(excluded_events)[0]
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
excluded_result = {}
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
overlap = np.sum(np.isin(llocs, excluded_time).astype(int)) / len(llocs)
if overlap > max_overlap:
excluded_result[idx] = corr_locs.T
else:
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result, excluded_result
def get_surround_masks(masks, surround_rad=20, dilate=True):
def get_bounding_circle_radius(masks):
rows, cols = np.any(masks, axis=-1), np.any(masks, axis=-2)
rs = np.apply_along_axis(first_last, -1, rows)
cs = np.apply_along_axis(first_last, -1, cols)
centers = np.array(
[rs[:, 0] + (rs[:, 1] - rs[:, 0]) / 2, cs[:, 0] + (cs[:, 1] - cs[:, 0]) / 2]
).T
# bounding radius is the hypotenuse /2
radii = np.sqrt((cs[:, 0] - cs[:, 0]) ** 2 + (rs[:, 1] - rs[:, 0]) ** 2) / 2
return radii, centers
def first_last(arr_1d):
return np.where(arr_1d)[0][[0, -1]]
# avoid border effects/bleedthrough by dilating existing rois
structure = np.ones((3, 3, 3))
structure[0::2, ...] = 0
dilated_masks = ndimage.binary_dilation(masks, structure=structure, iterations=4)
roi_rads, centers = get_bounding_circle_radius(dilated_masks)
x, y = np.indices(masks.shape[-2:])
rs = np.sqrt(
(x[None, ...] - centers[:, 0, None, None]) ** 2
+ (y[None, ...] - centers[:, 1, None, None]) ** 2
)
surround_roi = np.logical_xor(
dilated_masks, rs < roi_rads[:, None, None] + surround_rad
)
return surround_roi
def get_surround_masks_cellfree(masks, surround_rad=50, dilate=True):
all_masks = np.any(masks, axis=0)
# avoid border effects/bleedthrough by dilating existing rois
structure = np.ones((3, 3, 3))
structure[0::2, ...] = 0
dilated_masks = ndimage.binary_dilation(masks, structure=structure, iterations=4)
centers = np.array([ndimage.center_of_mass(m) for m in dilated_masks])
x, y = np.indices(masks.shape[-2:])
rs = np.sqrt(
(x[None, ...] - centers[:, 0, None, None]) ** 2
+ (y[None, ...] - centers[:, 1, None, None]) ** 2
)
surround_roi = np.logical_and(~all_masks, rs < surround_rad)
# see if the area is too small
areas = np.sum(surround_roi, axis=(-2, -1))
# check nowhere too small
small = areas < 2000
if np.any(small):
for new_rs in range(surround_rad, 2 * surround_rad, 10):
small = areas < 2000
surround_roi[small] = np.logical_and(~all_masks, rs[small, ...] < new_rs)
if not np.any(small):
break
small = areas < 2000
# revert back to normal behaviour - just take an area around and dont care about cells
if np.any(small):
surround_roi[small] = np.logical_and(masks[small], rs[small, ...] < new_rs)
return surround_roi
def get_observation_length(event_dict):
tc = event_dict["tc_filt"]
exclude_dict = event_dict["surround_events"]
length = tc.shape[1]
lengths = []
# count as non-observed any time during a surround event
for i in range(tc.shape[0]):
if i in exclude_dict.keys():
lengths.append(
length - np.sum(exclude_dict[i].T[:, 1] - exclude_dict[i].T[:, 0])
)
else:
lengths.append(length)
return np.array(lengths)
def apply_exclusion(exclude_dict, tc):
excluded_tc = np.copy(tc)
for roi in exclude_dict.keys():
for i in range(exclude_dict[roi].shape[-1]):
ids = exclude_dict[roi][:, i]
excluded_tc[roi, ids[0] : ids[1]] = 1
return excluded_tc
def soft_threshold(arr, thresh, to=1):
# Thresholds towards to value
res = np.copy(arr)
wh = np.where(np.abs(arr - to) < thresh)
n_wh = np.where(np.abs(arr - to) >= thresh)
sgn = np.sign(arr - to)
res[wh] = to
res[n_wh] -= sgn[n_wh] * thresh
return res
def split_event(t, ids):
# splits a zero-(actually 1) crossing event into multiple non-zero crossing events recursively
# removes one point
if not np.logical_and(
np.any(t[ids[0] : ids[1]] - 1 > 0), np.any(t[ids[0] : ids[1]] - 1 < 0)
):
return [tuple(ids)]
else:
zer_loc = np.argmin(np.abs(t[ids[0] : ids[1]] - 1)) + ids[0]
return split_event(t, (ids[0], zer_loc)) + split_event(t, (zer_loc + 1, ids[1]))
def correct_event_signs(t, llocs):
corr_locs = []
for id_idx, ids in enumerate(llocs):
if np.logical_and(
np.any(t[ids[0] : ids[1]] - 1 > 0), np.any(t[ids[0] : ids[1]] - 1 < 0)
):
split_ids = split_event(t, ids)
corr_locs.extend(split_ids)
else:
corr_locs.append(ids)
corr_locs = np.array(corr_locs)
# if we have split into a zero size (due to boundary issue in split events), remove
if np.any((corr_locs[:, 1] - corr_locs[:, 0]) < 1):
corr_locs = corr_locs[(corr_locs[:, 1] - corr_locs[:, 0]) > 0]
return corr_locs
def recursive_split_locs(seq):
# splits a sequence into n adjacent sequences
diff = np.diff(seq)
if not np.any(diff != 1):
return [(seq[0], seq[-1])]
else:
wh = np.where(diff != 1)[0][0] + 1
return recursive_split_locs(seq[:wh]) + recursive_split_locs(seq[wh:])
def detect_events(tc, std, z_score=3, exclude_first=0):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result
def get_event_properties(event_dict, use_filt=True):
if use_filt:
t = event_dict["tc"]
else:
t = event_dict["tc_filt"]
result_dict = {}
for idx in event_dict.keys():
if type(idx) == str:
continue
event_properties = []
for locs in event_dict[idx].T:
if np.logical_and(
np.any(t[idx, locs[0] : locs[1]] - 1 > 0),
np.any(t[idx, locs[0] : locs[1]] - 1 < 0),
):
print(idx, locs)
raise ValueError("This shouldnt happen")
event_length = locs[1] - locs[0]
event_amplitude = (
t[idx, np.argmax(np.abs(t[idx, locs[0] : locs[1]] - 1)) + locs[0]] - 1
)
event_integrated = np.sum(t[idx, locs[0] : locs[1]] - 1)
event_properties.append([event_length, event_amplitude, event_integrated])
if len(np.array(event_properties)) == 0:
pdb.set_trace()
result_dict[idx] = np.array(event_properties)
event_dict["event_props"] = result_dict
return event_dict
def lab2masks(seg):
masks = []
for i in range(1, seg.max() + 1):
masks.append((seg == i).astype(int))
return | np.array(masks) | numpy.array |
import datetime as dt
from unittest import SkipTest
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.options import Cycle
from holoviews.core.util import pd
from holoviews.element import Points
from holoviews.streams import Stream
from .testplot import TestBokehPlot, bokeh_renderer
from ..utils import ParamLogStream
try:
from bokeh.models import FactorRange, LinearColorMapper, CategoricalColorMapper
from bokeh.models import Scatter
except:
pass
class TestPointPlot(TestBokehPlot):
def test_points_colormapping(self):
points = Points(np.random.rand(10, 4), vdims=['a', 'b']).opts(plot=dict(color_index=3))
self._test_colormapping(points, 3)
def test_points_colormapping_with_nonselection(self):
opts = dict(plot=dict(color_index=3),
style=dict(nonselection_color='red'))
points = Points(np.random.rand(10, 4), vdims=['a', 'b']).opts(**opts)
self._test_colormapping(points, 3)
def test_points_colormapping_categorical(self):
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(plot=dict(color_index='b'))
plot = bokeh_renderer.get_plot(points)
plot.initialize_plot()
cmapper = plot.handles['color_mapper']
self.assertIsInstance(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, list(points['b']))
def test_points_color_selection_nonselection(self):
opts = dict(color='green', selection_color='red', nonselection_color='blue')
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_color, 'green')
self.assertEqual(glyph_renderer.glyph.line_color, 'green')
self.assertEqual(glyph_renderer.selection_glyph.fill_color, 'red')
self.assertEqual(glyph_renderer.selection_glyph.line_color, 'red')
self.assertEqual(glyph_renderer.nonselection_glyph.fill_color, 'blue')
self.assertEqual(glyph_renderer.nonselection_glyph.line_color, 'blue')
def test_points_alpha_selection_nonselection(self):
opts = dict(alpha=0.8, selection_alpha=1.0, nonselection_alpha=0.2)
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_alpha, 0.8)
self.assertEqual(glyph_renderer.glyph.line_alpha, 0.8)
self.assertEqual(glyph_renderer.selection_glyph.fill_alpha, 1)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
self.assertEqual(glyph_renderer.nonselection_glyph.fill_alpha, 0.2)
self.assertEqual(glyph_renderer.nonselection_glyph.line_alpha, 0.2)
def test_points_alpha_selection_partial(self):
opts = dict(selection_alpha=1.0, selection_fill_alpha=0.2)
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_alpha, 1.0)
self.assertEqual(glyph_renderer.glyph.line_alpha, 1.0)
self.assertEqual(glyph_renderer.selection_glyph.fill_alpha, 0.2)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
def test_batched_points(self):
overlay = NdOverlay({i: Points(np.arange(i)) for i in range(1, 100)})
plot = bokeh_renderer.get_plot(overlay)
extents = plot.get_extents(overlay, {})
self.assertEqual(extents, (0, 0, 98, 98))
def test_batched_points_size_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(size=Cycle(values=[1, 2])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
size = np.array([1, 1, 2, 2])
color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['color'], color)
self.assertEqual(plot.handles['source'].data['size'], size)
def test_batched_points_line_color_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_color = np.array(['red', 'red', 'blue', 'blue'])
fill_color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['fill_color'], fill_color)
self.assertEqual(plot.handles['source'].data['line_color'], line_color)
def test_batched_points_alpha_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(alpha=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
alpha = np.array([0.5, 0.5, 1., 1.])
color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['alpha'], alpha)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_points_line_width_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(line_width=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_width = np.array([0.5, 0.5, 1., 1.])
color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['line_width'], line_width)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_points_overlay_datetime_hover(self):
if pd is None:
raise SkipTest("Test requires pandas")
obj = NdOverlay({i: Points((list(pd.date_range('2016-01-01', '2016-01-31')), range(31))) for i in range(5)},
kdims=['Test'])
opts = {'Points': {'tools': ['hover']}}
obj = obj.opts(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x_dt_strings}'), ('y', '@{y}')])
def test_points_overlay_hover_batched(self):
obj = NdOverlay({i: Points(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Points': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj.opts(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x}'), ('y', '@{y}')])
def test_points_overlay_hover(self):
obj = NdOverlay({i: Points(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Points': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj.opts(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x}'),
('y', '@{y}')])
def test_points_no_single_item_legend(self):
points = Points([('A', 1), ('B', 2)], label='A')
plot = bokeh_renderer.get_plot(points)
plot.initialize_plot()
fig = plot.state
self.assertEqual(len(fig.legend), 0)
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
bokeh_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('z dimension is not numeric, '
'cannot use to scale Points size.\n')
self.assertEqual(log_msg, warning)
def test_points_categorical_xaxis(self):
points = Points((['A', 'B', 'C'], (1,2,3)))
plot = bokeh_renderer.get_plot(points)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
def test_points_categorical_xaxis_mixed_type(self):
points = Points(range(10))
points2 = Points((['A', 'B', 'C', 1, 2.0], (1, 2, 3, 4, 5)))
plot = bokeh_renderer.get_plot(points*points2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, list(map(str, range(10))) + ['A', 'B', 'C', '2.0'])
def test_points_categorical_xaxis_invert_axes(self):
points = Points((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_points_overlay_categorical_xaxis(self):
points = Points((['A', 'B', 'C'], (1,2,3)))
points2 = Points((['B', 'C', 'D'], (1,2,3)))
plot = bokeh_renderer.get_plot(points*points2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D'])
def test_points_overlay_categorical_xaxis_invert_axis(self):
points = Points((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_xaxis=True))
points2 = Points((['B', 'C', 'D'], (1,2,3)))
plot = bokeh_renderer.get_plot(points*points2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D'][::-1])
def test_points_overlay_categorical_xaxis_invert_axes(self):
points = Points((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_axes=True))
points2 = Points((['B', 'C', 'D'], (1,2,3)))
plot = bokeh_renderer.get_plot(points*points2)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D'])
def test_points_padding_square(self):
points = Points([1, 2, 3]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.2)
self.assertEqual(x_range.end, 2.2)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_square_per_axis(self):
curve = Points([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0)
self.assertEqual(x_range.end, 2.2)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.4)
def test_points_padding_unequal(self):
points = Points([1, 2, 3]).options(padding=(0.05, 0.1))
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.1)
self.assertEqual(x_range.end, 2.1)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_padding_nonsquare(self):
points = Points([1, 2, 3]).options(padding=0.1, width=600)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.1)
self.assertEqual(x_range.end, 2.1)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_padding_logx(self):
points = Points([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.89595845984076228)
self.assertEqual(x_range.end, 3.3483695221017129)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_padding_logy(self):
points = Points([1, 2, 3]).options(padding=0.1, logy=True)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.2)
self.assertEqual(x_range.end, 2.2)
self.assertEqual(y_range.start, 0.89595845984076228)
self.assertEqual(y_range.end, 3.3483695221017129)
def test_points_padding_datetime_square(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, np.datetime64('2016-03-31T19:12:00.000000000'))
self.assertEqual(x_range.end, np.datetime64('2016-04-03T04:48:00.000000000'))
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_padding_datetime_nonsquare(self):
points = Points([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, width=600
)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, np.datetime64('2016-03-31T21:36:00.000000000'))
self.assertEqual(x_range.end, np.datetime64('2016-04-03T02:24:00.000000000'))
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_padding_hard_xrange(self):
points = Points([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0)
self.assertEqual(x_range.end, 3)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_padding_soft_xrange(self):
points = Points([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0)
self.assertEqual(x_range.end, 3)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_points_datetime_hover(self):
points = Points([(0, 1, dt.datetime(2017, 1, 1))], vdims='date').options(tools=['hover'])
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
self.assertEqual(cds.data['date'].astype('datetime64'), np.array([1483228800000000000]))
self.assertEqual(cds.data['date_dt_strings'], ['2017-01-01 00:00:00'])
hover = plot.handles['hover']
self.assertEqual(hover.tooltips, [('x', '@{x}'), ('y', '@{y}'), ('date', '@{date_dt_strings}')])
def test_points_selected(self):
points = Points([(0, 0), (1, 1), (2, 2)]).opts(selected=[0, 2])
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
self.assertEqual(cds.selected.indices, [0, 2])
def test_points_update_selected(self):
stream = Stream.define('Selected', selected=[])()
points = Points([(0, 0), (1, 1), (2, 2)]).apply.opts(selected=stream.param.selected)
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
self.assertEqual(cds.selected.indices, [])
stream.event(selected=[0, 2])
self.assertEqual(cds.selected.indices, [0, 2])
###########################
# Styling mapping #
###########################
def test_point_color_op(self):
points = Points([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims='color').options(color='color')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'color'})
self.assertEqual(glyph.line_color, {'field': 'color'})
def test_point_linear_color_op(self):
points = Points([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims='color').options(color='color')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, LinearColorMapper)
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 2)
self.assertEqual(cds.data['color'], np.array([0, 1, 2]))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, {'field': 'color', 'transform': cmapper})
def test_point_categorical_color_op(self):
points = Points([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'C')],
vdims='color').options(color='color')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, ['A', 'B', 'C'])
self.assertEqual(cds.data['color'], np.array(['A', 'B', 'C']))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, {'field': 'color', 'transform': cmapper})
def test_point_categorical_dtype_color_op(self):
df = pd.DataFrame(dict(sample_id=['subject 1', 'subject 2', 'subject 3', 'subject 4'], category=['apple', 'pear', 'apple', 'pear'], value=[1, 2, 3, 4]))
df['category'] = df['category'].astype('category')
points = Points(df, ['sample_id', 'value']).opts(color='category')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, ['apple', 'pear'])
self.assertEqual(np.asarray(cds.data['color']), np.array(['apple', 'pear', 'apple', 'pear']))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, {'field': 'color', 'transform': cmapper})
def test_point_explicit_cmap_color_op(self):
points = Points([(0, 0), (0, 1), (0, 2)]).options(
color='y', cmap={0: 'red', 1: 'green', 2: 'blue'})
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, ['0', '1', '2'])
self.assertEqual(cmapper.palette, ['red', 'green', 'blue'])
self.assertEqual(cds.data['color_str__'], ['0', '1', '2'])
self.assertEqual(glyph.fill_color, {'field': 'color_str__', 'transform': cmapper})
self.assertEqual(glyph.line_color, {'field': 'color_str__', 'transform': cmapper})
def test_point_line_color_op(self):
points = Points([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims='color').options(line_color='color')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_color'], np.array(['#000', '#F00', '#0F0']))
self.assertNotEqual(glyph.fill_color, {'field': 'line_color'})
self.assertEqual(glyph.line_color, {'field': 'line_color'})
def test_point_fill_color_op(self):
points = Points([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims='color').options(fill_color='color')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'fill_color'})
self.assertNotEqual(glyph.line_color, {'field': 'fill_color'})
def test_point_angle_op(self):
points = Points([(0, 0, 0), (0, 1, 45), (0, 2, 90)],
vdims='angle').options(angle='angle')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['angle'], np.array([0, 0.785398, 1.570796]))
self.assertEqual(glyph.angle, {'field': 'angle'})
def test_point_alpha_op(self):
points = Points([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims='alpha').options(alpha='alpha')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.fill_alpha, {'field': 'alpha'})
def test_point_line_alpha_op(self):
points = Points([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims='alpha').options(line_alpha='alpha')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.line_alpha, {'field': 'line_alpha'})
self.assertNotEqual(glyph.fill_alpha, {'field': 'line_alpha'})
def test_point_fill_alpha_op(self):
points = Points([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims='alpha').options(fill_alpha='alpha')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_alpha'], np.array([0, 0.2, 0.7]))
self.assertNotEqual(glyph.line_alpha, {'field': 'fill_alpha'})
self.assertEqual(glyph.fill_alpha, {'field': 'fill_alpha'})
def test_point_size_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='size').options(size='size')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['size'], np.array([1, 4, 8]))
self.assertEqual(glyph.size, {'field': 'size'})
def test_point_line_width_op(self):
points = Points([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims='line_width').options(line_width='line_width')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_width'], np.array([1, 4, 8]))
self.assertEqual(glyph.line_width, {'field': 'line_width'})
def test_point_marker_op(self):
points = Points([(0, 0, 'circle'), (0, 1, 'triangle'), (0, 2, 'square')],
vdims='marker').options(marker='marker')
plot = bokeh_renderer.get_plot(points)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['marker'], | np.array(['circle', 'triangle', 'square']) | numpy.array |
import lmfit
import numpy as np
from numpy.linalg import inv
import scipy as sp
import itertools
import matplotlib as mpl
from collections import OrderedDict, defaultdict
from pycqed.utilities import timer as tm_mod
from sklearn.mixture import GaussianMixture as GM
from sklearn.tree import DecisionTreeClassifier as DTC
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
import pycqed.analysis_v2.readout_analysis as roa
from pycqed.analysis_v2.readout_analysis import \
Singleshot_Readout_Analysis_Qutrit as SSROQutrit
import pycqed.analysis_v2.tomography_qudev as tomo
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from copy import deepcopy
from pycqed.measurement.sweep_points import SweepPoints
from pycqed.measurement.calibration.calibration_points import CalibrationPoints
import matplotlib.pyplot as plt
from pycqed.analysis.three_state_rotation import predict_proba_avg_ro
import logging
from pycqed.utilities import math
from pycqed.utilities.general import find_symmetry_index
import pycqed.measurement.waveform_control.segment as seg_mod
import datetime as dt
log = logging.getLogger(__name__)
try:
import qutip as qtp
except ImportError as e:
log.warning('Could not import qutip, tomography code will not work')
class AveragedTimedomainAnalysis(ba.BaseDataAnalysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.single_timestamp = True
self.params_dict = {
'value_names': 'value_names',
'measured_values': 'measured_values',
'measurementstring': 'measurementstring',
'exp_metadata': 'exp_metadata'}
self.numeric_params = []
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
self.metadata = self.raw_data_dict.get('exp_metadata', {})
if self.metadata is None:
self.metadata = {}
cal_points = self.metadata.get('cal_points', None)
cal_points = self.options_dict.get('cal_points', cal_points)
cal_points_list = roa.convert_channel_names_to_index(
cal_points, len(self.raw_data_dict['measured_values'][0]),
self.raw_data_dict['value_names'])
self.proc_data_dict['cal_points_list'] = cal_points_list
measured_values = self.raw_data_dict['measured_values']
cal_idxs = self._find_calibration_indices()
scales = [np.std(x[cal_idxs]) for x in measured_values]
observable_vectors = np.zeros((len(cal_points_list),
len(measured_values)))
observable_vector_stds = np.ones_like(observable_vectors)
for i, observable in enumerate(cal_points_list):
for ch_idx, seg_idxs in enumerate(observable):
x = measured_values[ch_idx][seg_idxs] / scales[ch_idx]
if len(x) > 0:
observable_vectors[i][ch_idx] = np.mean(x)
if len(x) > 1:
observable_vector_stds[i][ch_idx] = np.std(x)
Omtx = (observable_vectors[1:] - observable_vectors[0]).T
d0 = observable_vectors[0]
corr_values = np.zeros(
(len(cal_points_list) - 1, len(measured_values[0])))
for i in range(len(measured_values[0])):
d = np.array([x[i] / scale for x, scale in zip(measured_values,
scales)])
corr_values[:, i] = inv(Omtx.T.dot(Omtx)).dot(Omtx.T).dot(d - d0)
self.proc_data_dict['corr_values'] = corr_values
def measurement_operators_and_results(self):
"""
Converts the calibration points to measurement operators. Assumes that
the calibration points are ordered the same as the basis states for
the tomography calculation (e.g. for two qubits |gg>, |ge>, |eg>, |ee>).
Also assumes that each calibration in the passed cal_points uses
different segments.
Returns:
A tuple of
the measured values with outthe calibration points;
the measurement operators corresponding to each channel;
and the expected covariation matrix between the operators.
"""
d = len(self.proc_data_dict['cal_points_list'])
cal_point_idxs = [set() for _ in range(d)]
for i, idxs_lists in enumerate(self.proc_data_dict['cal_points_list']):
for idxs in idxs_lists:
cal_point_idxs[i].update(idxs)
cal_point_idxs = [sorted(list(idxs)) for idxs in cal_point_idxs]
cal_point_idxs = np.array(cal_point_idxs)
raw_data = self.raw_data_dict['measured_values']
means = [None] * d
residuals = [list() for _ in raw_data]
for i, cal_point_idx in enumerate(cal_point_idxs):
means[i] = [np.mean(ch_data[cal_point_idx]) for ch_data in raw_data]
for j, ch_residuals in enumerate(residuals):
ch_residuals += list(raw_data[j][cal_point_idx] - means[i][j])
means = np.array(means)
residuals = np.array(residuals)
Fs = [np.diag(ms) for ms in means.T]
Omega = residuals.dot(residuals.T) / len(residuals.T)
data_idxs = np.setdiff1d(np.arange(len(raw_data[0])),
cal_point_idxs.flatten())
data = np.array([ch_data[data_idxs] for ch_data in raw_data])
return data, Fs, Omega
def _find_calibration_indices(self):
cal_indices = set()
cal_points = self.options_dict['cal_points']
nr_segments = self.raw_data_dict['measured_values'].shape[-1]
for observable in cal_points:
if isinstance(observable, (list, np.ndarray)):
for idxs in observable:
cal_indices.update({idx % nr_segments for idx in idxs})
else: # assume dictionaries
for idxs in observable.values():
cal_indices.update({idx % nr_segments for idx in idxs})
return list(cal_indices)
def all_cal_points(d, nr_ch, reps=1):
"""
Generates a list of calibration points for a Hilbert space of dimension d,
with nr_ch channels and reps reprtitions of each calibration point.
"""
return [[list(range(-reps*i, -reps*(i-1)))]*nr_ch for i in range(d, 0, -1)]
class Single_Qubit_TimeDomainAnalysis(ba.BaseDataAnalysis):
def process_data(self):
"""
This takes care of rotating and normalizing the data if required.
this should work for several input types.
- I/Q values (2 quadratures + cal points)
- weight functions (1 quadrature + cal points)
- counts (no cal points)
There are several options possible to specify the normalization
using the options dict.
cal_points (tuple) of indices of the calibrati on points
zero_coord, one_coord
"""
cal_points = self.options_dict.get('cal_points', None)
zero_coord = self.options_dict.get('zero_coord', None)
one_coord = self.options_dict.get('one_coord', None)
if cal_points is None:
# default for all standard Timedomain experiments
cal_points = [list(range(-4, -2)), list(range(-2, 0))]
if len(self.raw_data_dict['measured_values']) == 1:
# if only one weight function is used rotation is not required
self.proc_data_dict['corr_data'] = a_tools.rotate_and_normalize_data_1ch(
self.raw_data_dict['measured_values'][0],
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
else:
self.proc_data_dict['corr_data'], zero_coord, one_coord = \
a_tools.rotate_and_normalize_data(
data=self.raw_data_dict['measured_values'][0:2],
zero_coord=zero_coord,
one_coord=one_coord,
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
# This should be added to the hdf5 datafile but cannot because of the
# way that the "new" analysis works.
# self.add_dataset_to_analysisgroup('Corrected data',
# self.proc_data_dict['corr_data'])
class MultiQubit_TimeDomain_Analysis(ba.BaseDataAnalysis):
"""
Base class for multi-qubit time-domain analyses.
Parameters that can be specified in the options dict:
- rotation_type: type of rotation to be done on the raw data.
Types of rotations supported by this class:
- 'cal_states' (default, no need to specify): rotation based on
CalibrationPoints for 1D and TwoD data. Supports 2 and 3 cal states
per qubit
- 'fixed_cal_points' (only for TwoD, with 2 cal states):
does PCA on the columns corresponding to the highest cal state
to find the indices of that cal state in the columns, then uses
those to get the data points for the other cal state. Does
rotation using the mean of the data points corresponding to the
two cal states as the zero and one coordinates to rotate
the data.
- 'PCA': ignores cal points and does pca; in the case of TwoD data it
does PCA row by row
- 'column_PCA': cal points and does pca; in the case of TwoD data it
does PCA column by column
- 'global_PCA' (only for TwoD): does PCA on the whole 2D array
- main_sp (default: None): dict with keys qb_name used to specify which
sweep parameter should be used as axis label in plot
- functionality to split measurements with tiled sweep_points:
- split_params (default: None): list of strings with sweep parameters
names expected to be found in SweepPoints. Groups data by these
parameters and stores it in proc_data_dict['split_data_dict'].
- select_split (default: None): dict with keys qb_names and values
a tuple (sweep_param_name, value) or (sweep_param_name, index).
Stored in self.measurement_strings which specify the plot title.
The selected parameter must also be part of the split_params for
that qubit.
"""
def __init__(self,
qb_names: list=None, label: str='',
t_start: str=None, t_stop: str=None, data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True,
params_dict=None, numeric_params=None, **kwargs):
super().__init__(t_start=t_start, t_stop=t_stop, label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting, **kwargs)
self.qb_names = qb_names
self.params_dict = params_dict
if self.params_dict is None:
self.params_dict = {}
self.numeric_params = numeric_params
self.measurement_strings = {}
if self.numeric_params is None:
self.numeric_params = []
if not hasattr(self, "job"):
self.create_job(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
label=label, data_file_path=data_file_path,
do_fitting=do_fitting, options_dict=options_dict,
extract_only=extract_only, params_dict=params_dict,
numeric_params=numeric_params, **kwargs)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
self.qb_names = self.get_param_value('ro_qubits')
if self.qb_names is None:
raise ValueError('Provide the "qb_names."')
self.measurement_strings = {
qbn: self.raw_data_dict['measurementstring'] for qbn in
self.qb_names}
self.channel_map = self.get_param_value('meas_obj_value_names_map')
if self.channel_map is None:
# if the new name meas_obj_value_names_map is not found, try with
# the old name channel_map
self.channel_map = self.get_param_value('channel_map')
if self.channel_map is None:
value_names = self.raw_data_dict['value_names']
if np.ndim(value_names) > 0:
value_names = value_names
if 'w' in value_names[0]:
self.channel_map = a_tools.get_qb_channel_map_from_hdf(
self.qb_names, value_names=value_names,
file_path=self.raw_data_dict['folder'])
else:
self.channel_map = {}
for qbn in self.qb_names:
self.channel_map[qbn] = value_names
if len(self.channel_map) == 0:
raise ValueError('No qubit RO channels have been found.')
# creates self.sp
self.get_sweep_points()
def get_sweep_points(self):
self.sp = self.get_param_value('sweep_points')
if self.sp is not None:
self.sp = SweepPoints(self.sp)
def create_sweep_points_dict(self):
sweep_points_dict = self.get_param_value('sweep_points_dict')
hard_sweep_params = self.get_param_value('hard_sweep_params')
if self.sp is not None:
self.mospm = self.get_param_value('meas_obj_sweep_points_map')
main_sp = self.get_param_value('main_sp')
if self.mospm is None:
raise ValueError('When providing "sweep_points", '
'"meas_obj_sweep_points_map" has to be '
'provided in addition.')
if main_sp is not None:
self.proc_data_dict['sweep_points_dict'] = {}
for qbn, p in main_sp.items():
dim = self.sp.find_parameter(p)
if dim == 1:
log.warning(f"main_sp is only implemented for sweep "
f"dimension 0, but {p} is in dimension 1.")
self.proc_data_dict['sweep_points_dict'][qbn] = \
{'sweep_points': self.sp.get_sweep_params_property(
'values', dim, p)}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.sp.get_sweep_params_property(
'values', 0, self.mospm[qbn])[0]}
for qbn in self.qb_names}
elif sweep_points_dict is not None:
# assumed to be of the form {qbn1: swpts_array1, qbn2: swpts_array2}
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': sweep_points_dict[qbn]}
for qbn in self.qb_names}
elif hard_sweep_params is not None:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': list(hard_sweep_params.values())[0][
'values']} for qbn in self.qb_names}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.data_filter(
self.raw_data_dict['hard_sweep_points'])}
for qbn in self.qb_names}
def create_sweep_points_2D_dict(self):
soft_sweep_params = self.get_param_value('soft_sweep_params')
if self.sp is not None:
self.proc_data_dict['sweep_points_2D_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['sweep_points_2D_dict'][qbn] = \
OrderedDict()
for pn in self.mospm[qbn]:
if pn in self.sp[1]:
self.proc_data_dict['sweep_points_2D_dict'][qbn][
pn] = self.sp[1][pn][0]
elif soft_sweep_params is not None:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {pn: soft_sweep_params[pn]['values'] for
pn in soft_sweep_params}
for qbn in self.qb_names}
else:
if len(self.raw_data_dict['soft_sweep_points'].shape) == 1:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {self.raw_data_dict['sweep_parameter_names'][1]:
self.raw_data_dict['soft_sweep_points']} for
qbn in self.qb_names}
else:
sspn = self.raw_data_dict['sweep_parameter_names'][1:]
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {sspn[i]: self.raw_data_dict['soft_sweep_points'][i]
for i in range(len(sspn))} for qbn in self.qb_names}
if self.get_param_value('percentage_done', 100) < 100:
# This indicated an interrupted measurement.
# Remove non-measured sweep points in that case.
# raw_data_dict['soft_sweep_points'] is obtained in
# BaseDataAnalysis.add_measured_data(), and its length should
# always correspond to the actual number of measured soft sweep
# points.
ssl = len(self.raw_data_dict['soft_sweep_points'])
for sps in self.proc_data_dict['sweep_points_2D_dict'].values():
for k, v in sps.items():
sps[k] = v[:ssl]
def create_meas_results_per_qb(self):
measured_RO_channels = list(self.raw_data_dict['measured_data'])
meas_results_per_qb_raw = {}
meas_results_per_qb = {}
for qb_name, RO_channels in self.channel_map.items():
meas_results_per_qb_raw[qb_name] = {}
meas_results_per_qb[qb_name] = {}
if isinstance(RO_channels, str):
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if RO_channels in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
elif isinstance(RO_channels, list):
for qb_RO_ch in RO_channels:
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if qb_RO_ch in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
else:
raise TypeError('The RO channels for {} must either be a list '
'or a string.'.format(qb_name))
self.proc_data_dict['meas_results_per_qb_raw'] = \
meas_results_per_qb_raw
self.proc_data_dict['meas_results_per_qb'] = \
meas_results_per_qb
def process_data(self):
super().process_data()
self.data_filter = self.get_param_value('data_filter')
prep_params = self.get_param_value('preparation_params',
default_value=dict())
self.data_with_reset = False
if self.data_filter is None:
if 'active' in prep_params.get('preparation_type', 'wait'):
reset_reps = prep_params.get('reset_reps', 1)
self.data_filter = lambda x: x[reset_reps::reset_reps+1]
self.data_with_reset = True
elif "preselection" in prep_params.get('preparation_type', 'wait'):
self.data_filter = lambda x: x[1::2] # filter preselection RO
if self.data_filter is None:
self.data_filter = lambda x: x
self.create_sweep_points_dict()
self.create_meas_results_per_qb()
# temporary fix for appending calibration points to x values but
# without breaking sequences not yet using this interface.
self.rotate = self.get_param_value('rotate', default_value=False)
cal_points = self.get_param_value('cal_points')
last_ge_pulses = self.get_param_value('last_ge_pulses',
default_value=False)
try:
self.cp = CalibrationPoints.from_string(cal_points)
# for now assuming the same for all qubits.
self.cal_states_dict = self.cp.get_indices(
self.qb_names, prep_params)[self.qb_names[0]]
cal_states_rots = self.cp.get_rotations(last_ge_pulses,
self.qb_names[0])[self.qb_names[0]] if self.rotate \
else None
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=cal_states_rots)
sweep_points_w_calpts = \
{qbn: {'sweep_points': self.cp.extend_sweep_points(
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'], qbn)} for qbn in self.qb_names}
self.proc_data_dict['sweep_points_dict'] = sweep_points_w_calpts
except TypeError as e:
log.error(e)
log.warning("Failed retrieving cal point objects or states. "
"Please update measurement to provide cal point object "
"in metadata. Trying to get them using the old way ...")
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=None) \
if self.rotate else None
self.cal_states_dict = self.get_param_value('cal_states_dict',
default_value={})
if self.get_param_value('global_PCA') is not None:
log.warning('Parameter "global_PCA" is deprecated. Please set '
'rotation_type="global_PCA" instead.')
self.rotation_type = self.get_param_value(
'rotation_type',
default_value='cal_states' if self.rotate else 'no_rotation')
# create projected_data_dict
self.data_to_fit = deepcopy(self.get_param_value('data_to_fit'))
if self.data_to_fit is None:
# If we have cal points, but data_to_fit is not specified,
# choose a reasonable default value. In cases with only two cal
# points, this decides which projected plot is generated. (In
# cases with three cal points, we will anyways get all three
# projected plots.)
if 'e' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pe' for qbn in self.qb_names}
elif 'g' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pg' for qbn in self.qb_names}
else:
self.data_to_fit = {}
# TODO: Steph 15.09.2020
# This is a hack to allow list inside data_to_fit. These lists are
# currently only supported by MultiCZgate_CalibAnalysis
for qbn in self.data_to_fit:
if isinstance(self.data_to_fit[qbn], (list, tuple)):
self.data_to_fit[qbn] = self.data_to_fit[qbn][0]
if self.rotate or self.rotation_type == 'global_PCA':
self.cal_states_analysis()
else:
# this assumes data obtained with classifier detector!
# ie pg, pe, pf are expected to be in the value_names
self.proc_data_dict['projected_data_dict'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict'][qbn].update(
{state_prob: data for key, data in data_dict.items()
if state_prob in key})
if self.cal_states_dict is None:
self.cal_states_dict = {}
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
# correct probabilities given calibration matrix
if self.get_param_value("correction_matrix") is not None:
self.proc_data_dict['projected_data_dict_corrected'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
probas_raw = np.asarray([data_dict[k] for k in data_dict
for state_prob in ['pg', 'pe', 'pf'] if
state_prob in k])
corr_mtx = self.get_param_value("correction_matrix")[qbn]
probas_corrected = np.linalg.inv(corr_mtx).T @ probas_raw
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict_corrected'][qbn].update(
{state_prob: data for key, data in
zip(["pg", "pe", "pf"], probas_corrected)})
# get data_to_fit
self.proc_data_dict['data_to_fit'] = OrderedDict()
for qbn, prob_data in self.proc_data_dict[
'projected_data_dict'].items():
if qbn in self.data_to_fit:
self.proc_data_dict['data_to_fit'][qbn] = prob_data[
self.data_to_fit[qbn]]
# create msmt_sweep_points, sweep_points, cal_points_sweep_points
for qbn in self.qb_names:
if self.num_cal_points > 0:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][:-self.num_cal_points]
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-self.num_cal_points::]
else:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points']
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = []
if self.options_dict.get('TwoD', False):
self.create_sweep_points_2D_dict()
# handle data splitting if needed
self.split_data()
def split_data(self):
def unique(l):
try:
return np.unique(l, return_inverse=True)
except Exception:
h = [repr(a) for a in l]
_, i, j = np.unique(h, return_index=True, return_inverse=True)
return l[i], j
split_params = self.get_param_value('split_params', [])
if not len(split_params):
return
pdd = self.proc_data_dict
pdd['split_data_dict'] = {}
for qbn in self.qb_names:
pdd['split_data_dict'][qbn] = {}
for p in split_params:
dim = self.sp.find_parameter(p)
sv = self.sp.get_sweep_params_property(
'values', param_names=p, dimension=dim)
usp, ind = unique(sv)
if len(usp) <= 1:
continue
svs = [self.sp.subset(ind == i, dim) for i in
range(len(usp))]
[s.remove_sweep_parameter(p) for s in svs]
sdd = {}
pdd['split_data_dict'][qbn][p] = sdd
for i in range(len(usp)):
subset = (np.concatenate(
[ind == i,
[True] * len(pdd['sweep_points_dict'][qbn][
'cal_points_sweep_points'])]))
sdd[i] = {}
sdd[i]['value'] = usp[i]
sdd[i]['sweep_points'] = svs[i]
d = pdd['sweep_points_dict'][qbn]
if dim == 0:
sdd[i]['sweep_points_dict'] = {
'sweep_points': d['sweep_points'][subset],
'msmt_sweep_points':
d['msmt_sweep_points'][ind == i],
'cal_points_sweep_points':
d['cal_points_sweep_points'],
}
sdd[i]['sweep_points_2D_dict'] = pdd[
'sweep_points_2D_dict'][qbn]
else:
sdd[i]['sweep_points_dict'] = \
pdd['sweep_points_dict'][qbn]
sdd[i]['sweep_points_2D_dict'] = {
k: v[ind == i] for k, v in pdd[
'sweep_points_2D_dict'][qbn].items()}
for d in ['projected_data_dict', 'data_to_fit']:
if isinstance(pdd[d][qbn], dict):
if dim == 0:
sdd[i][d] = {k: v[:, subset] for
k, v in pdd[d][qbn].items()}
else:
sdd[i][d] = {k: v[ind == i, :] for
k, v in pdd[d][qbn].items()}
else:
if dim == 0:
sdd[i][d] = pdd[d][qbn][:, subset]
else:
sdd[i][d] = pdd[d][qbn][ind == i, :]
select_split = self.get_param_value('select_split')
if select_split is not None:
for qbn, select in select_split.items():
p, v = select
if p not in pdd['split_data_dict'][qbn]:
log.warning(f"Split parameter {p} for {qbn} not "
f"found. Ignoring this selection.")
try:
ind = [a['value'] for a in pdd['split_data_dict'][
qbn][p].values()].index(v)
except ValueError:
ind = v
try:
pdd['split_data_dict'][qbn][p][ind]
except ValueError:
log.warning(f"Value {v} for split parameter {p} "
f"of {qbn} not found. Ignoring this "
f"selection.")
continue
for d in ['projected_data_dict', 'data_to_fit',
'sweep_points_dict', 'sweep_points_2D_dict']:
pdd[d][qbn] = pdd['split_data_dict'][qbn][p][ind][d]
self.measurement_strings[qbn] += f' ({p}: {v})'
def get_cal_data_points(self):
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
do_PCA = self.rotation_type == 'PCA' or \
self.rotation_type == 'column_PCA'
self.cal_states_dict_for_rotation = OrderedDict()
states = False
cal_states_rotations = self.cal_states_rotations
for key in cal_states_rotations.keys():
if key == 'g' or key == 'e' or key == 'f':
states = True
for qbn in self.qb_names:
self.cal_states_dict_for_rotation[qbn] = OrderedDict()
if states:
cal_states_rot_qb = cal_states_rotations
else:
cal_states_rot_qb = cal_states_rotations[qbn]
for i in range(len(cal_states_rot_qb)):
cal_state = \
[k for k, idx in cal_states_rot_qb.items()
if idx == i][0]
self.cal_states_dict_for_rotation[qbn][cal_state] = \
None if do_PCA and self.num_cal_points != 3 else \
self.cal_states_dict[cal_state]
def cal_states_analysis(self):
self.get_cal_data_points()
self.proc_data_dict['projected_data_dict'] = OrderedDict(
{qbn: '' for qbn in self.qb_names})
for qbn in self.qb_names:
cal_states_dict = self.cal_states_dict_for_rotation[qbn]
if len(cal_states_dict) not in [0, 2, 3]:
raise NotImplementedError('Calibration states rotation is '
'currently only implemented for 0, '
'2, or 3 cal states per qubit.')
data_mostly_g = self.get_param_value('data_mostly_g',
default_value=True)
if self.get_param_value('TwoD', default_value=False):
if self.rotation_type == 'global_PCA':
self.proc_data_dict['projected_data_dict'].update(
self.global_pca_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.data_to_fit,
data_mostly_g=data_mostly_g))
elif len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
elif self.rotation_type == 'fixed_cal_points':
rotated_data_dict, zero_coord, one_coord = \
self.rotate_data_TwoD_same_fixed_cal_idxs(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit)
self.proc_data_dict['projected_data_dict'].update(
rotated_data_dict)
self.proc_data_dict['rotation_coordinates'] = \
[zero_coord, one_coord]
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit, data_mostly_g=data_mostly_g,
column_PCA=self.rotation_type == 'column_PCA'))
else:
if len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit, data_mostly_g=data_mostly_g))
@staticmethod
def rotate_data_3_cal_states(qb_name, meas_results_per_qb, channel_map,
cal_states_dict):
# FOR 3 CAL STATES
rotated_data_dict = OrderedDict()
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
raw_data = np.array([v for v in meas_res_dict.values()]).T
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
rotated_data = predict_proba_avg_ro(raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
return rotated_data_dict
@staticmethod
def rotate_data(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, data_to_fit, data_mostly_g=True):
# ONLY WORKS FOR 2 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[0]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
else:
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[0]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=np.array([v for v in meas_res_dict.values()]),
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[i]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf] = data
else:
rotated_data_dict[qb_name][ro_suf] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[i]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
else:
# two RO ch per qubit
keys = [k for k in meas_res_dict if ro_suf in k]
correct_keys = [k for k in keys
if k[len(qb_ro_ch0)+1::] == ro_suf]
data_array = np.array([meas_res_dict[k]
for k in correct_keys])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf] = data
return rotated_data_dict
@staticmethod
def rotate_data_3_cal_states_TwoD(qb_name, meas_results_per_qb,
channel_map, cal_states_dict):
# FOR 3 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = np.zeros(
raw_data_arr.shape)
for col in range(raw_data_arr.shape[1]):
raw_data = np.concatenate([
v[:, col].reshape(len(v[:, col]), 1) for
v in meas_res_dict.values()], axis=1)
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
# rotated data is (raw_data_arr.shape[0], 3)
rotated_data = predict_proba_avg_ro(
raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'][:, col] = \
rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
# transpose data
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = \
rotated_data_dict[qb_name][f'p{state}'].T
return rotated_data_dict
@staticmethod
def global_pca_TwoD(qb_name, meas_results_per_qb, channel_map,
data_to_fit, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('Global PCA is only implemented '
'for two-channel RO!')
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
data_array = np.array(
[v.T.flatten() for v in meas_res_dict.values()])
rot_flat_data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array)
data = np.reshape(rot_flat_data, raw_data_arr.T.shape)
data = a_tools.set_majority_sign(data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, data_to_fit,
column_PCA=False, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[row, :],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][col] = data
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data_array = np.array(
[v[row, :] for v in meas_res_dict.values()])
data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][
data_to_fit[qb_name]][col] = data
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][col] = data
else:
# two RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for k, v in meas_res_dict.items()
if ro_suf in k])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][col] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD_same_fixed_cal_idxs(qb_name, meas_results_per_qb,
channel_map, cal_states_dict,
data_to_fit):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('rotate_data_TwoD_same_fixed_cal_idxs '
'only implemented for two-channel RO!')
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
# do pca on the one cal states
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rot_dat_e = np.zeros(raw_data_arr.shape[1])
for row in cal_one_points:
rot_dat_e += a_tools.rotate_and_normalize_data_IQ(
data=np.array([v[row, :] for v in meas_res_dict.values()]),
cal_zero_points=None, cal_one_points=None)[0]
rot_dat_e /= len(cal_one_points)
# find the values of the zero and one cal points
col_idx = np.argmax(np.abs(rot_dat_e))
zero_coord = [np.mean([v[r, col_idx] for r in cal_zero_points])
for v in meas_res_dict.values()]
one_coord = [np.mean([v[r, col_idx] for r in cal_one_points])
for v in meas_res_dict.values()]
# rotate all data based on the fixed zero_coord and one_coord
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
rotated_data_dict[qb_name][
data_to_fit[qb_name]][col], _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
zero_coord=zero_coord,
one_coord=one_coord)
return rotated_data_dict, zero_coord, one_coord
def get_xaxis_label_unit(self, qb_name):
hard_sweep_params = self.get_param_value('hard_sweep_params')
sweep_name = self.get_param_value('sweep_name')
sweep_unit = self.get_param_value('sweep_unit')
if self.sp is not None:
main_sp = self.get_param_value('main_sp', None)
if main_sp is not None and qb_name in main_sp:
param_names = [main_sp[qb_name]]
else:
param_names = self.mospm[qb_name]
_, xunit, xlabel = self.sp.get_sweep_params_description(
param_names=param_names, dimension=0)[0]
elif hard_sweep_params is not None:
xlabel = list(hard_sweep_params)[0]
xunit = list(hard_sweep_params.values())[0][
'unit']
elif (sweep_name is not None) and (sweep_unit is not None):
xlabel = sweep_name
xunit = sweep_unit
else:
xlabel = self.raw_data_dict['sweep_parameter_names']
xunit = self.raw_data_dict['sweep_parameter_units']
if np.ndim(xlabel) > 0:
xlabel = xlabel[0]
if np.ndim(xunit) > 0:
xunit = xunit[0]
return xlabel, xunit
@staticmethod
def get_cal_state_color(cal_state_label):
if cal_state_label == 'g' or cal_state_label == r'$|g\rangle$':
return 'k'
elif cal_state_label == 'e' or cal_state_label == r'$|e\rangle$':
return 'gray'
elif cal_state_label == 'f' or cal_state_label == r'$|f\rangle$':
return 'C8'
else:
return 'C4'
@staticmethod
def get_latex_prob_label(prob_label):
if '$' in prob_label:
return prob_label
elif 'p' in prob_label.lower():
return r'$|{}\rangle$'.format(prob_label[-1])
else:
return r'$|{}\rangle$'.format(prob_label)
def prepare_plots(self):
if self.get_param_value('plot_proj_data', default_value=True):
select_split = self.get_param_value('select_split')
fig_name_suffix = self.get_param_value('fig_name_suffix', '')
title_suffix = self.get_param_value('title_suffix', '')
for qb_name, corr_data in self.proc_data_dict[
'projected_data_dict'].items():
fig_name = f'projected_plot_{qb_name}'
title_suf = title_suffix
if select_split is not None:
param, idx = select_split[qb_name]
# remove qb_name from param
p = '_'.join([e for e in param.split('_') if e != qb_name])
# create suffix
suf = f'({p}, {str(np.round(idx, 3))})'
# add suffix
fig_name += f'_{suf}'
title_suf = f'{suf}_{title_suf}' if \
len(title_suf) else suf
if isinstance(corr_data, dict):
for data_key, data in corr_data.items():
if not self.rotate:
data_label = data_key
plot_name_suffix = data_key
plot_cal_points = False
data_axis_label = 'Population'
else:
fn = f'{fig_name}_{data_key}'
data_label = 'Data'
plot_name_suffix = ''
tf = f'{data_key}_{title_suf}' if \
len(title_suf) else data_key
plot_cal_points = (
not self.options_dict.get('TwoD', False))
data_axis_label = \
'Strongest principal component (arb.)' if \
'pca' in self.rotation_type.lower() else \
'{} state population'.format(
self.get_latex_prob_label(data_key))
self.prepare_projected_data_plot(
fn, data, qb_name=qb_name,
data_label=data_label,
title_suffix=tf,
plot_name_suffix=plot_name_suffix,
fig_name_suffix=fig_name_suffix,
data_axis_label=data_axis_label,
plot_cal_points=plot_cal_points)
else:
fig_name = 'projected_plot_' + qb_name
self.prepare_projected_data_plot(
fig_name, corr_data, qb_name=qb_name,
plot_cal_points=(
not self.options_dict.get('TwoD', False)))
if self.get_param_value('plot_raw_data', default_value=True):
self.prepare_raw_data_plots(plot_filtered=False)
if 'preparation_params' in self.metadata:
if 'active' in self.metadata['preparation_params'].get(
'preparation_type', 'wait'):
self.prepare_raw_data_plots(plot_filtered=True)
def prepare_raw_data_plots(self, plot_filtered=False):
if plot_filtered or not self.data_with_reset:
key = 'meas_results_per_qb'
suffix = 'filtered' if self.data_with_reset else ''
func_for_swpts = lambda qb_name: self.proc_data_dict[
'sweep_points_dict'][qb_name]['sweep_points']
else:
key = 'meas_results_per_qb_raw'
suffix = ''
func_for_swpts = lambda qb_name: self.raw_data_dict[
'hard_sweep_points']
for qb_name, raw_data_dict in self.proc_data_dict[key].items():
if qb_name not in self.qb_names:
continue
sweep_points = func_for_swpts(qb_name)
if len(raw_data_dict) == 1:
numplotsx = 1
numplotsy = 1
elif len(raw_data_dict) == 2:
numplotsx = 1
numplotsy = 2
else:
numplotsx = 2
numplotsy = len(raw_data_dict) // 2 + len(raw_data_dict) % 2
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
fig_title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] +
'\nRaw data ' + suffix + ' ' + qb_name)
plot_name = 'raw_plot_' + qb_name + suffix
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
for ax_id, ro_channel in enumerate(raw_data_dict):
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict[
'sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_name}_{ro_channel}_{pn}'] = {
'fig_id': plot_name + '_' + pn,
'ax_id': ax_id,
'plotfn': self.plot_colorxy,
'xvals': sweep_points,
'yvals': ssp,
'zvals': raw_data_dict[ro_channel].T,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title,
'clabel': '{} (Vpeak)'.format(ro_channel)}
else:
self.plot_dicts[plot_name + '_' + ro_channel] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': sweep_points,
'xlabel': xlabel,
'xunit': xunit,
'yvals': raw_data_dict[ro_channel],
'ylabel': '{} (Vpeak)'.format(ro_channel),
'yunit': '',
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title}
if len(raw_data_dict) == 1:
self.plot_dicts[
plot_name + '_' + list(raw_data_dict)[0]]['ax_id'] = None
def prepare_projected_data_plot(
self, fig_name, data, qb_name, title_suffix='', sweep_points=None,
plot_cal_points=True, plot_name_suffix='', fig_name_suffix='',
data_label='Data', data_axis_label='', do_legend_data=True,
do_legend_cal_states=True):
if len(fig_name_suffix):
fig_name = f'{fig_name}_{fig_name_suffix}'
if data_axis_label == '':
data_axis_label = 'Strongest principal component (arb.)' if \
'pca' in self.rotation_type.lower() else \
'{} state population'.format(self.get_latex_prob_label(
self.data_to_fit[qb_name]))
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
if sweep_points is None:
sweep_points = self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points']
plot_names_cal = []
if plot_cal_points and self.num_cal_points != 0:
yvals = data[:-self.num_cal_points]
xvals = sweep_points[:-self.num_cal_points]
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
plot_dict_name_cal = fig_name + '_' + \
list(self.cal_states_dict)[i] + '_' + \
plot_name_suffix
plot_names_cal += [plot_dict_name_cal]
self.plot_dicts[plot_dict_name_cal] = {
'fig_id': fig_name,
'plotfn': self.plot_line,
'plotsize': plotsize,
'xvals': self.proc_data_dict['sweep_points_dict'][qb_name][
'cal_points_sweep_points'][cal_pts_idxs],
'yvals': data[cal_pts_idxs],
'setlabel': list(self.cal_states_dict)[i],
'do_legend': do_legend_cal_states,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
self.plot_dicts[plot_dict_name_cal+'_line'] = {
'fig_id': fig_name,
'plotsize': plotsize,
'plotfn': self.plot_hlines,
'y': np.mean(data[cal_pts_idxs]),
'xmin': self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points'][-1],
'colors': 'gray'}
else:
yvals = data
xvals = sweep_points
title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'])
title += '\n' + f'{qb_name}_{title_suffix}' if len(title_suffix) else \
' ' + qb_name
plot_dict_name = f'{fig_name}_{plot_name_suffix}'
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict['sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_dict_name}_{pn}'] = {
'plotfn': self.plot_colorxy,
'fig_id': fig_name + '_' + pn,
'xvals': xvals,
'yvals': ssp,
'zvals': yvals,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'zrange': self.get_param_value('zrange', None),
'title': title,
'clabel': data_axis_label}
else:
self.plot_dicts[plot_dict_name] = {
'plotfn': self.plot_line,
'fig_id': fig_name,
'plotsize': plotsize,
'xvals': xvals,
'xlabel': xlabel,
'xunit': xunit,
'yvals': yvals,
'ylabel': data_axis_label,
'yunit': '',
'setlabel': data_label,
'title': title,
'linestyle': 'none',
'do_legend': do_legend_data,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
# add plot_params to each plot dict
plot_params = self.get_param_value('plot_params', default_value={})
for plt_name in self.plot_dicts:
self.plot_dicts[plt_name].update(plot_params)
if len(plot_names_cal) > 0:
if do_legend_data and not do_legend_cal_states:
for plot_name in plot_names_cal:
plot_dict_cal = self.plot_dicts.pop(plot_name)
self.plot_dicts[plot_name] = plot_dict_cal
class Idling_Error_Rate_Analyisis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
post_sel_th = self.options_dict.get('post_sel_th', 0.5)
raw_shots = self.raw_data_dict['measured_values'][0][0]
post_sel_shots = raw_shots[::2]
data_shots = raw_shots[1::2]
data_shots[np.where(post_sel_shots > post_sel_th)] = np.nan
states = ['0', '1', '+']
self.proc_data_dict['xvals'] = np.unique(self.raw_data_dict['xvals'])
for i, state in enumerate(states):
self.proc_data_dict['shots_{}'.format(state)] =data_shots[i::3]
self.proc_data_dict['yvals_{}'.format(state)] = \
np.nanmean(np.reshape(self.proc_data_dict['shots_{}'.format(state)],
(len(self.proc_data_dict['xvals']), -1),
order='F'), axis=1)
def prepare_plots(self):
# assumes that value names are unique in an experiment
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
self.plot_dicts['Prepare in {}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': xvals,
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Counts',
'yrange': [0, 1],
'xrange': self.options_dict.get('xrange', None),
'yunit': 'frac',
'setlabel': 'Prepare in {}'.format(state),
'do_legend':True,
'title': (self.raw_data_dict['timestamps'][0]+' - ' +
self.raw_data_dict['timestamps'][-1] + '\n' +
self.raw_data_dict['measurementstring'][0]),
'legend_pos': 'upper right'}
if self.do_fitting:
for state in ['0', '1', '+']:
self.plot_dicts['fit_{}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit {}'.format(state)]['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'fit |{}>'.format(state),
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['fit_text']={
'ax_id':'main',
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': self.proc_data_dict['fit_msg']}
def analyze_fit_results(self):
fit_msg =''
states = ['0', '1', '+']
for state in states:
fr = self.fit_res['fit {}'.format(state)]
N1 = fr.params['N1'].value, fr.params['N1'].stderr
N2 = fr.params['N2'].value, fr.params['N2'].stderr
fit_msg += ('Prep |{}> : \n\tN_1 = {:.2g} $\pm$ {:.2g}'
'\n\tN_2 = {:.2g} $\pm$ {:.2g}\n').format(
state, N1[0], N1[1], N2[0], N2[1])
self.proc_data_dict['fit_msg'] = fit_msg
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
mod = lmfit.Model(fit_mods.idle_error_rate_exp_decay)
mod.guess = fit_mods.idle_err_rate_guess.__get__(mod, mod.__class__)
# Done here explicitly so that I can overwrite a specific guess
guess_pars = mod.guess(N=xvals, data=yvals)
vary_N2 = self.options_dict.get('vary_N2', True)
if not vary_N2:
guess_pars['N2'].value = 1e21
guess_pars['N2'].vary = False
self.fit_dicts['fit {}'.format(states[i])] = {
'model': mod,
'fit_xvals': {'N': xvals},
'fit_yvals': {'data': yvals},
'guess_pars': guess_pars}
# Allows fixing the double exponential coefficient
class Grovers_TwoQubitAllStates_Analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
for idx in [0,1]:
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(idx)] = \
self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[idx][0],
cal_one_points=cal_points[idx][1])
self.proc_data_dict['yvals_{}'.format(idx)] = yvals
y0 = self.proc_data_dict['yvals_0']
y1 = self.proc_data_dict['yvals_1']
p_success = ((y0[0]*y1[0]) +
(1-y0[1])*y1[1] +
(y0[2])*(1-y1[2]) +
(1-y0[3])*(1-y1[3]) )/4
self.proc_data_dict['p_success'] = p_success
def prepare_plots(self):
# assumes that value names are unique in an experiment
for i in [0, 1]:
yvals = self.proc_data_dict['yvals_{}'.format(i)]
xvals = self.raw_data_dict['xvals'][0]
ylabel = self.proc_data_dict['ylabel_{}'.format(i)]
self.plot_dicts['main_{}'.format(ylabel)] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_{}'.format(i)],
'ylabel': ylabel,
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': False,
'legend_pos': 'upper right'}
self.plot_dicts['limit_text']={
'ax_id':'main_{}'.format(ylabel),
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': 'P succes = {:.3f}'.format(self.proc_data_dict['p_success'])}
class FlippingAnalysis(Single_Qubit_TimeDomainAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = True
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
# This analysis makes a hardcoded assumption on the calibration points
self.options_dict['cal_points'] = [list(range(-4, -2)),
list(range(-2, 0))]
self.numeric_params = []
if auto:
self.run_analysis()
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
# Even though we expect an exponentially damped oscillation we use
# a simple cosine as this gives more reliable fitting and we are only
# interested in extracting the frequency of the oscillation
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# This enforces the oscillation to start at the equator
# and ensures that any over/under rotation is absorbed in the
# frequency
guess_pars['amplitude'].value = 0.5
guess_pars['amplitude'].vary = False
guess_pars['offset'].value = 0.5
guess_pars['offset'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
# In the case there are very few periods we fall back on a small
# angle approximation to extract the drive detuning
poly_mod = lmfit.models.PolynomialModel(degree=1)
# the detuning can be estimated using on a small angle approximation
# c1 = d/dN (cos(2*pi*f N) ) evaluated at N = 0 -> c1 = -2*pi*f
poly_mod.set_param_hint('frequency', expr='-c1/(2*pi)')
guess_pars = poly_mod.guess(x=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# Constraining the line ensures that it will only give a good fit
# if the small angle approximation holds
guess_pars['c0'].vary = False
guess_pars['c0'].value = 0.5
self.fit_dicts['line_fit'] = {
'model': poly_mod,
'fit_xvals': {'x': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
sf_line = self._get_scale_factor_line()
sf_cos = self._get_scale_factor_cos()
self.proc_data_dict['scale_factor'] = self.get_scale_factor()
msg = 'Scale fact. based on '
if self.proc_data_dict['scale_factor'] == sf_cos:
msg += 'cos fit\n'
else:
msg += 'line fit\n'
msg += 'cos fit: {:.4f}\n'.format(sf_cos)
msg += 'line fit: {:.4f}'.format(sf_line)
self.raw_data_dict['scale_factor_msg'] = msg
# TODO: save scale factor to file
def get_scale_factor(self):
"""
Returns the scale factor that should correct for the error in the
pulse amplitude.
"""
# Model selection based on the Bayesian Information Criterion (BIC)
# as calculated by lmfit
if (self.fit_dicts['line_fit']['fit_res'].bic <
self.fit_dicts['cos_fit']['fit_res'].bic):
scale_factor = self._get_scale_factor_line()
else:
scale_factor = self._get_scale_factor_cos()
return scale_factor
def _get_scale_factor_cos(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['cos_fit']['fit_res'].params['frequency']
# the square is needed to account for the difference between
# power and amplitude
scale_factor = (1+frequency)**2
phase = np.rad2deg(self.fit_dicts['cos_fit']['fit_res'].params['phase']) % 360
# phase ~90 indicates an under rotation so the scale factor
# has to be larger than 1. A phase ~270 indicates an over
# rotation so then the scale factor has to be smaller than one.
if phase > 180:
scale_factor = 1/scale_factor
return scale_factor
def _get_scale_factor_line(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['line_fit']['fit_res'].params['frequency']
scale_factor = (1+frequency)**2
# no phase sign check is needed here as this is contained in the
# sign of the coefficient
return scale_factor
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['sweep_points'],
'xlabel': self.raw_data_dict['xlabel'],
'xunit': self.raw_data_dict['xunit'], # does not do anything yet
'yvals': self.proc_data_dict['corr_data'],
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': 'data',
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']),
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'line fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['text_msg'] = {
'ax_id': 'main',
'ypos': 0.15,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': self.raw_data_dict['scale_factor_msg']}
class Intersect_Analysis(Single_Qubit_TimeDomainAnalysis):
"""
Analysis to extract the intercept of two parameters.
relevant options_dict parameters
ch_idx_A (int) specifies first channel for intercept
ch_idx_B (int) specifies second channel for intercept if same as first
it will assume data was taken interleaved.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xvals': 'sweep_points',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_A" and "ch_idx_B"
specified in the options dict. If ch_idx_A and ch_idx_B are the same
it will unzip the data.
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
# The channel containing the data must be specified in the options dict
ch_idx_A = self.options_dict.get('ch_idx_A', 0)
ch_idx_B = self.options_dict.get('ch_idx_B', 0)
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx_A]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx_A]
if ch_idx_A == ch_idx_B:
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_A'] = yvals[::2]
self.proc_data_dict['yvals_B'] = yvals[1::2]
else:
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['yvals_A'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['yvals_B'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_B][0]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_A'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_A']},
'fit_yvals': {'data': self.proc_data_dict['yvals_A']}}
self.fit_dicts['line_fit_B'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_B']},
'fit_yvals': {'data': self.proc_data_dict['yvals_B']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_A'].best_values
fr_1 = self.fit_res['line_fit_B'].best_values
c0 = (fr_0['c0'] - fr_1['c0'])
c1 = (fr_0['c1'] - fr_1['c1'])
c2 = (fr_0['c2'] - fr_1['c2'])
poly_coeff = [c0, c1, c2]
poly = np.polynomial.polynomial.Polynomial([fr_0['c0'],
fr_0['c1'], fr_0['c2']])
ic = np.polynomial.polynomial.polyroots(poly_coeff)
self.proc_data_dict['intersect_L'] = ic[0], poly(ic[0])
self.proc_data_dict['intersect_R'] = ic[1], poly(ic[1])
if (((np.min(self.proc_data_dict['xvals']))< ic[0]) and
( ic[0] < (np.max(self.proc_data_dict['xvals'])))):
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_L']
else:
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_R']
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_A'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_A'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'A',
'title': (self.proc_data_dict['timestamps'][0] + ' \n' +
self.proc_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_B'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_B'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'B',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_A'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_A']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit A',
'do_legend': True}
self.plot_dicts['line_fit_B'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_B']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit B',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['intersect'][0],
self.proc_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['intersect'][0]],
'yvals': [self.proc_data_dict['intersect'][1]],
'line_kws': {'alpha': .5, 'color':'gray',
'markersize':15},
'marker': 'o',
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_intersect(self):
return self.proc_data_dict['intersect']
class CZ_1QPhaseCal_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract the intercept for a single qubit phase calibration
experiment
N.B. this is a less generic version of "Intersect_Analysis" and should
be deprecated (MAR Dec 2017)
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx" in options dict and
then splits the data for th
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx = self.options_dict['ch_idx']
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_off'] = yvals[::2]
self.proc_data_dict['yvals_on'] = yvals[1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_off'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_off']},
'fit_yvals': {'data': self.proc_data_dict['yvals_off']}}
self.fit_dicts['line_fit_on'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_on']},
'fit_yvals': {'data': self.proc_data_dict['yvals_on']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_off'].best_values
fr_1 = self.fit_res['line_fit_on'].best_values
ic = -(fr_0['c0'] - fr_1['c0'])/(fr_0['c1'] - fr_1['c1'])
self.proc_data_dict['zero_phase_diff_intersect'] = ic
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_off'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_on'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['line_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['zero_phase_diff_intersect'],
self.raw_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['zero_phase_diff_intersect']],
'yvals': [np.mean(self.proc_data_dict['xvals_on'])],
'line_kws': {'alpha': 0},
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_zero_phase_diff_intersect(self):
return self.proc_data_dict['zero_phase_diff_intersect']
class Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Very basic analysis to determine the phase of a single oscillation
that has an assumed period of 360 degrees.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
idx = 1
self.proc_data_dict['yvals'] = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['xvals'][0],
data=self.proc_data_dict['yvals'], freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['xvals'][0]},
'fit_yvals': {'data': self.proc_data_dict['yvals']},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr = self.fit_res['cos_fit'].best_values
self.proc_data_dict['phi'] = np.rad2deg(fr['phase'])
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit',
'do_legend': True}
class Conditional_Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract quantities from a conditional oscillation.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_osc" and
"ch_idx_spec" in the options dict and then splits the data for the
off and on cases
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx_spec = self.options_dict.get('ch_idx_spec', 0)
ch_idx_osc = self.options_dict.get('ch_idx_osc', 1)
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
i = 0
for idx, type_str in zip([ch_idx_osc, ch_idx_spec], ['osc', 'spec']):
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(type_str)] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[i][0],
cal_one_points=cal_points[i][1])
i +=1
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
else:
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_off'][:-2],
data=self.proc_data_dict['yvals_osc_off'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_off'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_off'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_off'][:-2]},
'guess_pars': guess_pars}
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_on'][:-2],
data=self.proc_data_dict['yvals_osc_on'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_on'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_on'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_on'][:-2]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr_0 = self.fit_res['cos_fit_off'].params
fr_1 = self.fit_res['cos_fit_on'].params
phi0 = np.rad2deg(fr_0['phase'].value)
phi1 = np.rad2deg(fr_1['phase'].value)
phi0_stderr = np.rad2deg(fr_0['phase'].stderr)
phi1_stderr = np.rad2deg(fr_1['phase'].stderr)
self.proc_data_dict['phi_0'] = phi0, phi0_stderr
self.proc_data_dict['phi_1'] = phi1, phi1_stderr
phi_cond_stderr = (phi0_stderr**2+phi1_stderr**2)**.5
self.proc_data_dict['phi_cond'] = (phi1 -phi0), phi_cond_stderr
osc_amp = np.mean([fr_0['amplitude'], fr_1['amplitude']])
osc_amp_stderr = np.sqrt(fr_0['amplitude'].stderr**2 +
fr_1['amplitude']**2)/2
self.proc_data_dict['osc_amp_0'] = (fr_0['amplitude'].value,
fr_0['amplitude'].stderr)
self.proc_data_dict['osc_amp_1'] = (fr_1['amplitude'].value,
fr_1['amplitude'].stderr)
self.proc_data_dict['osc_offs_0'] = (fr_0['offset'].value,
fr_0['offset'].stderr)
self.proc_data_dict['osc_offs_1'] = (fr_1['offset'].value,
fr_1['offset'].stderr)
offs_stderr = (fr_0['offset'].stderr**2+fr_1['offset'].stderr**2)**.5
self.proc_data_dict['offs_diff'] = (
fr_1['offset'].value - fr_0['offset'].value, offs_stderr)
# self.proc_data_dict['osc_amp'] = (osc_amp, osc_amp_stderr)
self.proc_data_dict['missing_fraction'] = (
np.mean(self.proc_data_dict['yvals_spec_on'][:-2]) -
np.mean(self.proc_data_dict['yvals_spec_off'][:-2]))
def prepare_plots(self):
self._prepare_main_oscillation_figure()
self._prepare_spectator_qubit_figure()
def _prepare_main_oscillation_figure(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_off'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_on'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['cos_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
# offset as a guide for the eye
y = self.fit_res['cos_fit_off'].params['offset'].value
self.plot_dicts['cos_off_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C0', 'linestyle': 'dotted'}
}
phase_message = (
'Phase diff.: {:.1f} $\pm$ {:.1f} deg\n'
'Phase off: {:.1f} $\pm$ {:.1f}deg\n'
'Phase on: {:.1f} $\pm$ {:.1f}deg\n'
'Osc. amp. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. amp. on: {:.4f} $\pm$ {:.4f}\n'
'Offs. diff.: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. on: {:.4f} $\pm$ {:.4f}'.format(
self.proc_data_dict['phi_cond'][0],
self.proc_data_dict['phi_cond'][1],
self.proc_data_dict['phi_0'][0],
self.proc_data_dict['phi_0'][1],
self.proc_data_dict['phi_1'][0],
self.proc_data_dict['phi_1'][1],
self.proc_data_dict['osc_amp_0'][0],
self.proc_data_dict['osc_amp_0'][1],
self.proc_data_dict['osc_amp_1'][0],
self.proc_data_dict['osc_amp_1'][1],
self.proc_data_dict['offs_diff'][0],
self.proc_data_dict['offs_diff'][1],
self.proc_data_dict['osc_offs_0'][0],
self.proc_data_dict['osc_offs_0'][1],
self.proc_data_dict['osc_offs_1'][0],
self.proc_data_dict['osc_offs_1'][1]))
self.plot_dicts['phase_message'] = {
'ax_id': 'main',
'ypos': 0.9,
'xpos': 1.45,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': phase_message}
def _prepare_spectator_qubit_figure(self):
self.plot_dicts['spectator_qubit'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_off'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['spec_on'] = {
'plotfn': self.plot_line,
'ax_id': 'spectator_qubit',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_on'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
leak_msg = (
'Missing fraction: {:.2f} % '.format(
self.proc_data_dict['missing_fraction']*100))
self.plot_dicts['leak_msg'] = {
'ax_id': 'spectator_qubit',
'ypos': 0.7,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': leak_msg}
# offset as a guide for the eye
y = self.fit_res['cos_fit_on'].params['offset'].value
self.plot_dicts['cos_on_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C1', 'linestyle': 'dotted'}
}
class StateTomographyAnalysis(ba.BaseDataAnalysis):
"""
Analyses the results of the state tomography experiment and calculates
the corresponding quantum state.
Possible options that can be passed in the options_dict parameter:
cal_points: A data structure specifying the indices of the calibration
points. See the AveragedTimedomainAnalysis for format.
The calibration points need to be in the same order as the
used basis for the result.
data_type: 'averaged' or 'singleshot'. For singleshot data each
measurement outcome is saved and arbitrary order correlations
between the states can be calculated.
meas_operators: (optional) A list of qutip operators or numpy 2d arrays.
This overrides the measurement operators otherwise
found from the calibration points.
covar_matrix: (optional) The covariance matrix of the measurement
operators as a 2d numpy array. Overrides the one found
from the calibration points.
use_covariance_matrix (bool): Flag to define whether to use the
covariance matrix
basis_rots_str: A list of standard PycQED pulse names that were
applied to qubits before measurement
basis_rots: As an alternative to single_qubit_pulses, the basis
rotations applied to the system as qutip operators or numpy
matrices can be given.
mle: True/False, whether to do maximum likelihood fit. If False, only
least squares fit will be done, which could give negative
eigenvalues for the density matrix.
imle: True/False, whether to do iterative maximum likelihood fit. If
True, it takes preference over maximum likelihood method. Otherwise
least squares fit will be done, then 'mle' option will be checked.
pauli_raw: True/False, extracts Pauli expected values from a measurement
without assignment correction based on calibration data. If True,
takes preference over other methods except pauli_corr.
pauli_values: True/False, extracts Pauli expected values from a
measurement with assignment correction based on calibration data.
If True, takes preference over other methods.
iterations (optional): maximum number of iterations allowed in imle.
Tomographies with more qubits require more iterations to converge.
tolerance (optional): minimum change across iterations allowed in imle.
The iteration will stop if it goes under this value. Tomographies
with more qubits require smaller tolerance to converge.
rho_target (optional): A qutip density matrix that the result will be
compared to when calculating fidelity.
"""
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, **kwargs)
kwargs['auto'] = auto
self.single_timestamp = True
self.params_dict = {'exp_metadata': 'exp_metadata'}
self.numeric_params = []
self.data_type = self.options_dict['data_type']
if self.data_type == 'averaged':
self.base_analysis = AveragedTimedomainAnalysis(*args, **kwargs)
elif self.data_type == 'singleshot':
self.base_analysis = roa.MultiQubit_SingleShot_Analysis(
*args, **kwargs)
else:
raise KeyError("Invalid tomography data mode: '" + self.data_type +
"'. Valid modes are 'averaged' and 'singleshot'.")
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
tomography_qubits = self.options_dict.get('tomography_qubits', None)
data, Fs, Omega = self.base_analysis.measurement_operators_and_results(
tomography_qubits)
if 'data_filter' in self.options_dict:
data = self.options_dict['data_filter'](data.T).T
data = data.T
for i, v in enumerate(data):
data[i] = v / v.sum()
data = data.T
Fs = self.options_dict.get('meas_operators', Fs)
Fs = [qtp.Qobj(F) for F in Fs]
d = Fs[0].shape[0]
self.proc_data_dict['d'] = d
Omega = self.options_dict.get('covar_matrix', Omega)
if Omega is None:
Omega = np.diag(np.ones(len(Fs)))
elif len(Omega.shape) == 1:
Omega = np.diag(Omega)
metadata = self.raw_data_dict.get('exp_metadata',
self.options_dict.get(
'exp_metadata', {}))
if metadata is None:
metadata = {}
self.raw_data_dict['exp_metadata'] = metadata
basis_rots_str = metadata.get('basis_rots_str', None)
basis_rots_str = self.options_dict.get('basis_rots_str', basis_rots_str)
if basis_rots_str is not None:
nr_qubits = int(np.round(np.log2(d)))
pulse_list = list(itertools.product(basis_rots_str,
repeat=nr_qubits))
rotations = tomo.standard_qubit_pulses_to_rotations(pulse_list)
else:
rotations = metadata.get('basis_rots', None)
rotations = self.options_dict.get('basis_rots', rotations)
if rotations is None:
raise KeyError("Either 'basis_rots_str' or 'basis_rots' "
"parameter must be passed in the options "
"dictionary or in the experimental metadata.")
rotations = [qtp.Qobj(U) for U in rotations]
all_Fs = tomo.rotated_measurement_operators(rotations, Fs)
all_Fs = list(itertools.chain(*np.array(all_Fs, dtype=np.object).T))
all_mus = np.array(list(itertools.chain(*data.T)))
all_Omegas = sp.linalg.block_diag(*[Omega] * len(data[0]))
self.proc_data_dict['meas_operators'] = all_Fs
self.proc_data_dict['covar_matrix'] = all_Omegas
self.proc_data_dict['meas_results'] = all_mus
if self.options_dict.get('pauli_values', False):
rho_pauli = tomo.pauli_values_tomography(all_mus,Fs,basis_rots_str)
self.proc_data_dict['rho_raw'] = rho_pauli
self.proc_data_dict['rho'] = rho_pauli
elif self.options_dict.get('pauli_raw', False):
pauli_raw = self.generate_raw_pauli_set()
rho_raw = tomo.pauli_set_to_density_matrix(pauli_raw)
self.proc_data_dict['rho_raw'] = rho_raw
self.proc_data_dict['rho'] = rho_raw
elif self.options_dict.get('imle', False):
it = metadata.get('iterations', None)
it = self.options_dict.get('iterations', it)
tol = metadata.get('tolerance', None)
tol = self.options_dict.get('tolerance', tol)
rho_imle = tomo.imle_tomography(
all_mus, all_Fs, it, tol)
self.proc_data_dict['rho_imle'] = rho_imle
self.proc_data_dict['rho'] = rho_imle
else:
rho_ls = tomo.least_squares_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False)
else None )
self.proc_data_dict['rho_ls'] = rho_ls
self.proc_data_dict['rho'] = rho_ls
if self.options_dict.get('mle', False):
rho_mle = tomo.mle_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False) else None,
rho_guess=rho_ls)
self.proc_data_dict['rho_mle'] = rho_mle
self.proc_data_dict['rho'] = rho_mle
rho = self.proc_data_dict['rho']
self.proc_data_dict['purity'] = (rho * rho).tr().real
rho_target = metadata.get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
self.proc_data_dict['fidelity'] = tomo.fidelity(rho, rho_target)
if d == 4:
self.proc_data_dict['concurrence'] = tomo.concurrence(rho)
else:
self.proc_data_dict['concurrence'] = 0
def prepare_plots(self):
self.prepare_density_matrix_plot()
d = self.proc_data_dict['d']
if 2 ** (d.bit_length() - 1) == d:
# dimension is power of two, plot expectation values of pauli
# operators
self.prepare_pauli_basis_plot()
def prepare_density_matrix_plot(self):
self.tight_fig = self.options_dict.get('tight_fig', False)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
d = self.proc_data_dict['d']
xtick_labels = self.options_dict.get('rho_ticklabels', None)
ytick_labels = self.options_dict.get('rho_ticklabels', None)
if 2 ** (d.bit_length() - 1) == d:
nr_qubits = d.bit_length() - 1
fmt_string = '{{:0{}b}}'.format(nr_qubits)
labels = [fmt_string.format(i) for i in range(2 ** nr_qubits)]
if xtick_labels is None:
xtick_labels = ['$|' + lbl + r'\rangle$' for lbl in labels]
if ytick_labels is None:
ytick_labels = [r'$\langle' + lbl + '|$' for lbl in labels]
color = (0.5 * np.angle(self.proc_data_dict['rho'].full()) / np.pi) % 1.
cmap = self.options_dict.get('rho_colormap', self.default_phase_cmap())
if self.options_dict.get('pauli_raw', False):
title = 'Density matrix reconstructed from the Pauli (raw) set\n'
elif self.options_dict.get('pauli_values', False):
title = 'Density matrix reconstructed from the Pauli set\n'
elif self.options_dict.get('mle', False):
title = 'Maximum likelihood fit of the density matrix\n'
elif self.options_dict.get('it_mle', False):
title = 'Iterative maximum likelihood fit of the density matrix\n'
else:
title = 'Least squares fit of the density matrix\n'
empty_artist = mpl.patches.Rectangle((0, 0), 0, 0, visible=False)
legend_entries = [(empty_artist,
r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity']))]
if rho_target is not None:
legend_entries += [
(empty_artist, r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity']))]
if d == 4:
legend_entries += [
(empty_artist, r'Concurrence, $C = {:.2f}$'.format(
self.proc_data_dict['concurrence']))]
meas_string = self.base_analysis.\
raw_data_dict['measurementstring']
if isinstance(meas_string, list):
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['density_matrix'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(self.proc_data_dict['rho'].full()),
'zrange': (0, 1),
'color': color,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': (title + self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'do_legend': True,
'legend_entries': legend_entries,
'legend_kws': dict(loc='upper left', bbox_to_anchor=(0, 0.94))
}
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
if rho_target.type == 'ket':
rho_target = rho_target * rho_target.dag()
elif rho_target.type == 'bra':
rho_target = rho_target.dag() * rho_target
self.plot_dicts['density_matrix_target'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(rho_target.full()),
'zrange': (0, 1),
'color': (0.5 * np.angle(rho_target.full()) / np.pi) % 1.,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': ('Target density matrix\n' +
self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'bar_kws': dict(zorder=1),
}
def generate_raw_pauli_set(self):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
pauli_raw_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(self.proc_data_dict['meas_operators'],
self.proc_data_dict['meas_results']):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_raw_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_raw_values
def generate_corr_pauli_set(self,Fs,rotations):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
Fs_corr = []
assign_corr = []
for i,F in enumerate(Fs):
new_op = np.zeros(2**nr_qubits)
new_op[i] = 1
Fs_corr.append(qtp.Qobj(np.diag(new_op)))
assign_corr.append(np.diag(F.full()))
pauli_Fs = tomo.rotated_measurement_operators(rotations, Fs_corr)
pauli_Fs = list(itertools.chain(*np.array(pauli_Fs, dtype=np.object).T))
mus = self.proc_data_dict['meas_results']
pauli_mus = np.reshape(mus,[-1,2**nr_qubits])
for i,raw_mus in enumerate(pauli_mus):
pauli_mus[i] = np.matmul(np.linalg.inv(assign_corr),np.array(raw_mus))
pauli_mus = pauli_mus.flatten()
pauli_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(pauli_Fs,pauli_mus):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_values
def prepare_pauli_basis_plot(self):
yexp = tomo.density_matrix_to_pauli_basis(self.proc_data_dict['rho'])
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
labels = list(itertools.product(*[['I', 'X', 'Y', 'Z']]*nr_qubits))
labels = [''.join(label_list) for label_list in labels]
if nr_qubits == 1:
order = [1, 2, 3]
elif nr_qubits == 2:
order = [1, 2, 3, 4, 8, 12, 5, 6, 7, 9, 10, 11, 13, 14, 15]
elif nr_qubits == 3:
order = [1, 2, 3, 4, 8, 12, 16, 32, 48] + \
[5, 6, 7, 9, 10, 11, 13, 14, 15] + \
[17, 18, 19, 33, 34, 35, 49, 50, 51] + \
[20, 24, 28, 36, 40, 44, 52, 56, 60] + \
[21, 22, 23, 25, 26, 27, 29, 30, 31] + \
[37, 38, 39, 41, 42, 43, 45, 46, 47] + \
[53, 54, 55, 57, 58, 59, 61, 62, 63]
else:
order = np.arange(4**nr_qubits)[1:]
if self.options_dict.get('pauli_raw', False):
fit_type = 'raw counts'
elif self.options_dict.get('pauli_values', False):
fit_type = 'corrected counts'
elif self.options_dict.get('mle', False):
fit_type = 'maximum likelihood estimation'
elif self.options_dict.get('imle', False):
fit_type = 'iterative maximum likelihood estimation'
else:
fit_type = 'least squares fit'
meas_string = self.base_analysis. \
raw_data_dict['measurementstring']
if np.ndim(meas_string) > 0:
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['pauli_basis'] = {
'plotfn': self.plot_bar,
'xcenters': np.arange(len(order)),
'xwidth': 0.4,
'xrange': (-1, len(order)),
'yvals': np.array(yexp)[order],
'xlabel': r'Pauli operator, $\hat{O}$',
'ylabel': r'Expectation value, $\mathrm{Tr}(\hat{O} \hat{\rho})$',
'title': 'Pauli operators, ' + fit_type + '\n' +
self.raw_data_dict['timestamp'] + ' ' + meas_string,
'yrange': (-1.1, 1.1),
'xtick_loc': np.arange(4**nr_qubits - 1),
'xtick_rotation': 90,
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(zorder=10),
'setlabel': 'Fit to experiment',
'do_legend': True
}
if nr_qubits > 2:
self.plot_dicts['pauli_basis']['plotsize'] = (10, 5)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
ytar = tomo.density_matrix_to_pauli_basis(rho_target)
self.plot_dicts['pauli_basis_target'] = {
'plotfn': self.plot_bar,
'ax_id': 'pauli_basis',
'xcenters': np.arange(len(order)),
'xwidth': 0.8,
'yvals': np.array(ytar)[order],
'xtick_loc': np.arange(len(order)),
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(color='0.8', zorder=0),
'setlabel': 'Target values',
'do_legend': True
}
purity_str = r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity'])
if rho_target is not None:
fidelity_str = '\n' + r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity'])
else:
fidelity_str = ''
if self.proc_data_dict['d'] == 4:
concurrence_str = '\n' + r'Concurrence, $C = {:.1f}\%$'.format(
100 * self.proc_data_dict['concurrence'])
else:
concurrence_str = ''
self.plot_dicts['pauli_info_labels'] = {
'ax_id': 'pauli_basis',
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'line_kws': {'alpha': 0},
'setlabel': purity_str + fidelity_str,
'do_legend': True
}
def default_phase_cmap(self):
cols = np.array(((41, 39, 231), (61, 130, 163), (208, 170, 39),
(209, 126, 4), (181, 28, 20), (238, 76, 152),
(251, 130, 242), (162, 112, 251))) / 255
n = len(cols)
cdict = {
'red': [[i/n, cols[i%n][0], cols[i%n][0]] for i in range(n+1)],
'green': [[i/n, cols[i%n][1], cols[i%n][1]] for i in range(n+1)],
'blue': [[i/n, cols[i%n][2], cols[i%n][2]] for i in range(n+1)],
}
return mpl.colors.LinearSegmentedColormap('DMDefault', cdict)
class ReadoutROPhotonsAnalysis(Single_Qubit_TimeDomainAnalysis):
"""
Analyses the photon number in the RO based on the
readout_photons_in_resonator function
function specific options for options dict:
f_qubit
chi
artif_detuning
print_fit_results
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
close_figs: bool=False, options_dict: dict=None,
extract_only: bool=False, do_fitting: bool=False,
auto: bool=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, label=label,
extract_only=extract_only, do_fitting=do_fitting)
if self.options_dict.get('TwoD', None) is None:
self.options_dict['TwoD'] = True
self.label = label
self.params_dict = {
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'sweep_points_2D': 'sweep_points_2D',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = self.options_dict.get('numeric_params',
OrderedDict())
self.kappa = self.options_dict.get('kappa_effective', None)
self.chi = self.options_dict.get('chi', None)
self.T2 = self.options_dict.get('T2echo', None)
self.artif_detuning = self.options_dict.get('artif_detuning', 0)
if (self.kappa is None) or (self.chi is None) or (self.T2 is None):
raise ValueError('kappa_effective, chi and T2echo must be passed to '
'the options_dict.')
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
self.proc_data_dict['qubit_state'] = [[],[]]
self.proc_data_dict['delay_to_relax'] = self.raw_data_dict[
'sweep_points_2D'][0]
self.proc_data_dict['ramsey_times'] = []
for i,x in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])):
self.proc_data_dict['qubit_state'][0].append([])
self.proc_data_dict['qubit_state'][1].append([])
for j,y in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])[i]):
if j%2 == 0:
self.proc_data_dict['qubit_state'][0][i].append(y)
else:
self.proc_data_dict['qubit_state'][1][i].append(y)
for i,x in enumerate( self.raw_data_dict['sweep_points'][0]):
if i % 2 == 0:
self.proc_data_dict['ramsey_times'].append(x)
#I STILL NEED to pass Chi
def prepare_fitting(self):
self.proc_data_dict['photon_number'] = [[],[]]
self.proc_data_dict['fit_results'] = []
self.proc_data_dict['ramsey_fit_results'] = [[],[]]
for i,tau in enumerate(self.proc_data_dict['delay_to_relax']):
self.proc_data_dict['ramsey_fit_results'][0].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][0][i][:-4]/
max(self.proc_data_dict['qubit_state'][0][i][:-4]),
state=0,
kw=self.options_dict))
self.proc_data_dict['ramsey_fit_results'][1].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][1][i][:-4]/
max(self.proc_data_dict['qubit_state'][1][i][:-4]),
state=1,
kw=self.options_dict))
n01 = self.proc_data_dict['ramsey_fit_results'
][0][i][0].params['n0'].value
n02 = self.proc_data_dict['ramsey_fit_results'
][1][i][0].params['n0'].value
self.proc_data_dict['photon_number'][0].append(n01)
self.proc_data_dict['photon_number'][1].append(n02)
def run_fitting(self):
print_fit_results = self.params_dict.pop('print_fit_results',False)
exp_dec_mod = lmfit.Model(fit_mods.ExpDecayFunc)
exp_dec_mod.set_param_hint('n',
value=1,
vary=False)
exp_dec_mod.set_param_hint('offset',
value=0,
min=0,
vary=True)
exp_dec_mod.set_param_hint('tau',
value=self.proc_data_dict[
'delay_to_relax'][-1],
min=1e-11,
vary=True)
exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
params = exp_dec_mod.make_params()
self.fit_res = OrderedDict()
self.fit_res['ground_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][0],
params=params,
t=self.proc_data_dict['delay_to_relax'])
self.fit_res['excited_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][1],
params=params,
t=self.proc_data_dict['delay_to_relax'])
if print_fit_results:
print(self.fit_res['ground_state'].fit_report())
print(self.fit_res['excited_state'].fit_report())
def fit_Ramsey(self, x, y, state, **kw):
x = np.array(x)
y = np.array(y)
exp_dec_p_mod = lmfit.Model(fit_mods.ExpDecayPmod)
comb_exp_dec_mod = lmfit.Model(fit_mods.CombinedOszExpDecayFunc)
average = np.mean(y)
ft_of_data = np.fft.fft(y)
index_of_fourier_maximum = np.argmax(np.abs(
ft_of_data[1:len(ft_of_data) // 2])) + 1
max_ramsey_delay = x[-1] - x[0]
fft_axis_scaling = 1 / max_ramsey_delay
freq_est = fft_axis_scaling * index_of_fourier_maximum
n_est = (freq_est-self.artif_detuning)/(2 * self.chi)
exp_dec_p_mod.set_param_hint('T2echo',
value=self.T2,
vary=False)
exp_dec_p_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('kappa',
value=self.kappa[state],
vary=False)
exp_dec_p_mod.set_param_hint('chi',
value=self.chi,
vary=False)
exp_dec_p_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau',
value=self.T2,
vary=True)
comb_exp_dec_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('oscillation_offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau_gauss',
value=self.kappa[state],
vary=True)
comb_exp_dec_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
comb_exp_dec_mod.set_param_hint('chi',
value=self.chi,
vary=False)
if (np.average(y[:4]) >
np.average(y[4:8])):
phase_estimate = 0
else:
phase_estimate = np.pi
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
amplitude_guess = 0.5
if np.all(np.logical_and(y >= 0, y <= 1)):
exp_dec_p_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
else:
print('data is not normalized, varying amplitude')
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
fit_res_1 = exp_dec_p_mod.fit(data=y,
t=x,
params= exp_dec_p_mod.make_params())
fit_res_2 = comb_exp_dec_mod.fit(data=y,
t=x,
params= comb_exp_dec_mod.make_params())
if fit_res_1.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [exp_dec_p_mod.fit(
data=y,
t=x,
params= exp_dec_p_mod.make_params())]
chisqr_lst = [fit_res_1.chisqr for fit_res_1 in fit_res_lst]
fit_res_1 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_2.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [comb_exp_dec_mod.fit(
data=y,
t=x,
params= comb_exp_dec_mod.make_params())]
chisqr_lst = [fit_res_2.chisqr for fit_res_2 in fit_res_lst]
fit_res_2 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_1.chisqr < fit_res_2.chisqr:
self.proc_data_dict['params'] = exp_dec_p_mod.make_params()
return [fit_res_1,fit_res_1,fit_res_2]
else:
self.proc_data_dict['params'] = comb_exp_dec_mod.make_params()
return [fit_res_2,fit_res_1,fit_res_2]
def prepare_plots(self):
self.prepare_2D_sweep_plot()
self.prepare_photon_number_plot()
self.prepare_ramsey_plots()
def prepare_2D_sweep_plot(self):
self.plot_dicts['off_full_data_'+self.label] = {
'title': 'Raw data |g>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][0]) }
self.plot_dicts['on_full_data_'+self.label] = {
'title': 'Raw data |e>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][1]) }
def prepare_ramsey_plots(self):
x_fit = np.linspace(self.proc_data_dict['ramsey_times'][0],
max(self.proc_data_dict['ramsey_times']),101)
for i in range(len(self.proc_data_dict['ramsey_fit_results'][0])):
self.plot_dicts['off_'+str(i)] = {
'title': 'Ramsey w t_delay = '+\
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][0][i]/
max(self.proc_data_dict['qubit_state'][0][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|g> data_'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_g_'+str(i)] = {
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][0][i]),
'do_legend': True }
self.plot_dicts['on_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][1][i]/
max(self.proc_data_dict['qubit_state'][1][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|e> data_'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_e_'+str(i)] = {
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][1][i]),
'do_legend': True }
def prepare_photon_number_plot(self):
ylabel = 'Average photon number'
yunit = ''
x_fit = np.linspace(min(self.proc_data_dict['delay_to_relax']),
max(self.proc_data_dict['delay_to_relax']),101)
minmax_data = [min(min(self.proc_data_dict['photon_number'][0]),
min(self.proc_data_dict['photon_number'][1])),
max(max(self.proc_data_dict['photon_number'][0]),
max(self.proc_data_dict['photon_number'][1]))]
minmax_data[0] -= minmax_data[0]/5
minmax_data[1] += minmax_data[1]/5
self.proc_data_dict['photon_number'][1],
self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit)
self.plot_dicts['Photon number count'] = {
'plotfn': self.plot_line,
'xlabel': 'Delay after first RO-pulse',
'ax_id': 'Photon number count ',
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][0],
'ylabel': ylabel,
'yunit': yunit,
'yrange': minmax_data,
'title': 'Residual photon number',
'color': 'b',
'linestyle': '',
'marker': 'o',
'setlabel': '|g> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main2'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': x_fit,
'yvals': self.fit_res['ground_state'].eval(
self.fit_res['ground_state'].params,
t=x_fit),
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'b',
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main3'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][1],
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'r',
'linestyle': '',
'marker': 'o',
'setlabel': '|e> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main4'] = {
'plotfn': self.plot_line,
'xunit': 's',
'ax_id': 'Photon number count ',
'xvals': x_fit,
'yvals': self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit),
'yrange': minmax_data,
'ylabel': ylabel,
'color': 'r',
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['hidden_1'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_g = '
''+str("%.3f" %
(self.fit_res['ground_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True }
self.plot_dicts['hidden_2'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_e = '
''+str("%.3f" %
(self.fit_res['excited_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True}
class RODynamicPhaseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names: list=None, t_start: str=None, t_stop: str=None,
data_file_path: str=None, single_timestamp: bool=False,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting,
auto=False)
if auto:
self.run_analysis()
def process_data(self):
super().process_data()
if 'qbp_name' in self.metadata:
self.pulsed_qbname = self.metadata['qbp_name']
else:
self.pulsed_qbname = self.options_dict.get('pulsed_qbname')
self.measured_qubits = [qbn for qbn in self.channel_map if
qbn != self.pulsed_qbname]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.measured_qubits:
ro_dict = self.proc_data_dict['projected_data_dict'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
for ro_suff, data in ro_dict.items():
cos_mod = lmfit.Model(fit_mods.CosFunc)
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
guess_pars = fit_mods.Cos_guess(
model=cos_mod,
t=sweep_points,
data=data)
guess_pars['amplitude'].vary = True
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
key = 'cos_fit_{}{}'.format(qbn, ro_suff)
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.dynamic_phases = OrderedDict()
for meas_qbn in self.measured_qubits:
self.dynamic_phases[meas_qbn] = \
(self.fit_dicts['cos_fit_{}_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'] -
self.fit_dicts['cos_fit_{}_ref_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'])*180/np.pi
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for meas_qbn in self.measured_qubits:
sweep_points_dict = self.proc_data_dict['sweep_points_dict'][
meas_qbn]
if self.num_cal_points != 0:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][:-self.num_cal_points],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][:-self.num_cal_points]]
sweep_points = sweep_points_dict['msmt_sweep_points']
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
key = list(self.cal_states_dict)[i] + meas_qbn
self.plot_dicts[key] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_line,
'xvals': np.mean([
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs],
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs]],
axis=0),
'yvals': np.mean([
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][cal_pts_idxs],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][cal_pts_idxs]],
axis=0),
'setlabel': list(self.cal_states_dict)[i],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
else:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure']]
sweep_points = sweep_points_dict['sweep_points']
self.plot_dicts['dyn_phase_plot_' + meas_qbn] = {
'plotfn': self.plot_line,
'xvals': [sweep_points, sweep_points],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': ['with measurement', 'no measurement'],
'title': (self.raw_data_dict['timestamps'][0] + ' ' +
self.raw_data_dict['measurementstring'][0]),
'linestyle': 'none',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_ref_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_ref_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
textstr = 'Dynamic phase = {:.2f}'.format(
self.dynamic_phases[meas_qbn]) + r'$^{\circ}$'
self.plot_dicts['text_msg_' + meas_qbn] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'ypos': -0.175,
'xpos': 0.5,
'horizontalalignment': 'center',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class FluxAmplitudeSweepAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
self.mask_freq = kwargs.pop('mask_freq', None)
self.mask_amp = kwargs.pop('mask_amp', None)
super().__init__(qb_names, *args, **kwargs)
def extract_data(self):
super().extract_data()
# Set some default values specific to FluxPulseScopeAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('rotation_type', default_value=None) is None:
self.options_dict['rotation_type'] = 'global_PCA'
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_sp = {qb: len(pdd['sweep_points_dict'][qb]['sweep_points'])
for qb in self.qb_names}
nr_sp2d = {qb: len(list(pdd['sweep_points_2D_dict'][qb].values())[0])
for qb in self.qb_names}
nr_cp = self.num_cal_points
# make matrix out of vector
data_reshaped = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb]).T.flatten(), (nr_sp[qb], nr_sp2d[qb]))
for qb in self.qb_names}
pdd['data_reshaped'] = data_reshaped
# remove calibration points from data to fit
data_no_cp = {qb: np.array([pdd['data_reshaped'][qb][i, :]
for i in range(nr_sp[qb]-nr_cp)])
for qb in self.qb_names}
# apply mask
for qb in self.qb_names:
if self.mask_freq is None:
self.mask_freq = [True]*nr_sp2d[qb] # by default, no point is masked
if self.mask_amp is None:
self.mask_amp = [True]*(nr_sp[qb]-nr_cp)
pdd['freqs_masked'] = {}
pdd['amps_masked'] = {}
pdd['data_masked'] = {}
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
pdd['freqs_masked'][qb] = \
pdd['sweep_points_2D_dict'][qb][sp_param][self.mask_freq]
pdd['amps_masked'][qb] = \
pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points][self.mask_amp]
data_masked = data_no_cp[qb][self.mask_amp,:]
pdd['data_masked'][qb] = data_masked[:, self.mask_freq]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
# Gaussian fit of amplitude slices
gauss_mod = fit_mods.GaussianModel_v2()
for qb in self.qb_names:
for i in range(len(pdd['amps_masked'][qb])):
data = pdd['data_masked'][qb][i,:]
self.fit_dicts[f'gauss_fit_{qb}_{i}'] = {
'model': gauss_mod,
'fit_xvals': {'x': pdd['freqs_masked'][qb]},
'fit_yvals': {'data': data}
}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['gauss_center'] = {}
pdd['gauss_center_err'] = {}
pdd['filtered_center'] = {}
pdd['filtered_amps'] = {}
for qb in self.qb_names:
pdd['gauss_center'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].best_values['center']
for i in range(len(pdd['amps_masked'][qb]))])
pdd['gauss_center_err'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].params['center'].stderr
for i in range(len(pdd['amps_masked'][qb]))])
# filter out points with stderr > 1e6 Hz
pdd['filtered_center'][qb] = np.array([])
pdd['filtered_amps'][qb] = np.array([])
for i, stderr in enumerate(pdd['gauss_center_err'][qb]):
try:
if stderr < 1e6:
pdd['filtered_center'][qb] = \
np.append(pdd['filtered_center'][qb],
pdd['gauss_center'][qb][i])
pdd['filtered_amps'][qb] = \
np.append(pdd['filtered_amps'][qb],
pdd['sweep_points_dict'][qb]\
['sweep_points'][:-self.num_cal_points][i])
except:
continue
# if gaussian fitting does not work (i.e. all points were filtered
# out above) use max value of data to get an estimate of freq
if len(pdd['filtered_amps'][qb]) == 0:
for qb in self.qb_names:
freqs = np.array([])
for i in range(pdd['data_masked'][qb].shape[0]):
freqs = np.append(freqs, pdd['freqs_masked'][qb]\
[np.argmax(pdd['data_masked'][qb][i,:])])
pdd['filtered_center'][qb] = freqs
pdd['filtered_amps'][qb] = pdd['amps_masked'][qb]
# fit the freqs to the qubit model
self.fit_func = self.get_param_value('fit_func', fit_mods.Qubit_dac_to_freq)
if self.fit_func == fit_mods.Qubit_dac_to_freq_precise:
fit_guess_func = fit_mods.Qubit_dac_arch_guess_precise
else:
fit_guess_func = fit_mods.Qubit_dac_arch_guess
freq_mod = lmfit.Model(self.fit_func)
fixed_params = \
self.get_param_value("fixed_params_for_fit", {}).get(qb, None)
if fixed_params is None:
fixed_params = dict(E_c=0)
freq_mod.guess = fit_guess_func.__get__(
freq_mod, freq_mod.__class__)
self.fit_dicts[f'freq_fit_{qb}'] = {
'model': freq_mod,
'fit_xvals': {'dac_voltage': pdd['filtered_amps'][qb]},
'fit_yvals': {'data': pdd['filtered_center'][qb]},
"guessfn_pars": {"fixed_params": fixed_params}}
self.run_fitting()
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_colorxy,
'xvals': pdd['sweep_points_dict'][qb]['sweep_points'],
'yvals': pdd['sweep_points_2D_dict'][qb][sp_param],
'zvals': np.transpose(pdd['data_reshaped'][qb]),
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'zlabel': 'Excited state population',
}
if self.do_fitting:
if self.options_dict.get('scatter', True):
label = f'freq_scatter_{qb}_scatter'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '',
'marker': 'o',
'xvals': pdd['filtered_amps'][qb],
'yvals': pdd['filtered_center'][qb],
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'color': 'white',
}
amps = pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points]
label = f'freq_scatter_{qb}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '-',
'marker': '',
'xvals': amps,
'yvals': self.fit_func(amps,
**self.fit_res[f'freq_fit_{qb}'].best_values),
'color': 'red',
}
class T1FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
self.lengths = OrderedDict()
self.amps = OrderedDict()
self.freqs = OrderedDict()
for qbn in self.qb_names:
len_key = [pn for pn in self.mospm[qbn] if 'length' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse length.')
self.lengths[qbn] = self.sp.get_sweep_params_property(
'values', 0, len_key[0])
amp_key = [pn for pn in self.mospm[qbn] if 'amp' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse amplitude.')
self.amps[qbn] = self.sp.get_sweep_params_property(
'values', 1, amp_key[0])
freq_key = [pn for pn in self.mospm[qbn] if 'freq' in pn]
if len(freq_key) == 0:
self.freqs[qbn] = None
else:
self.freqs[qbn] =self.sp.get_sweep_params_property(
'values', 1, freq_key[0])
nr_amps = len(self.amps[self.qb_names[0]])
nr_lengths = len(self.lengths[self.qb_names[0]])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
exp_mod = fit_mods.ExponentialModel()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped_no_cp'][qb]):
self.fit_dicts[f'exp_fit_{qb}_amp_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.lengths[qb]},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T1'] = {}
pdd['T1_err'] = {}
for qb in self.qb_names:
pdd['T1'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_amp_{i}'].best_values['decay'])
for i in range(len(self.amps[qb]))])
pdd['T1_err'][qb] = np.array([
self.fit_res[f'exp_fit_{qb}_amp_{i}'].params['decay'].stderr
for i in range(len(self.amps[qb]))])
for i in range(len(self.amps[qb])):
try:
if pdd['T1_err'][qb][i] >= 10 * pdd['T1'][qb][i]:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
for p, param_values in enumerate([self.amps, self.freqs]):
if param_values is None:
continue
suffix = '_amp' if p == 0 else '_freq'
mask = pdd['mask'][qb]
xlabel = r'Flux pulse amplitude' if p == 0 else \
r'Derived qubit frequency'
if self.do_fitting:
# Plot T1 vs flux pulse amplitude
label = f'T1_fit_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': pdd['T1'][qb][mask],
'yerr': pdd['T1_err'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'T1',
'yunit': 's',
'color': 'blue',
}
# Plot rotated integrated average in dependece of flux pulse
# amplitude and length
label = f'T1_color_plot_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': self.lengths[qb],
'zvals': np.transpose(pdd['data_reshaped_no_cp'][qb][mask]),
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Flux pulse length',
'yunit': 's',
'zlabel': r'Excited state population'
}
# Plot population loss for the first flux pulse length as a
# function of flux pulse amplitude
label = f'Pop_loss_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': 1 - pdd['data_reshaped_no_cp'][qb][:, 0][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Pop. loss @ {:.0f} ns'.format(
self.lengths[qb][0]/1e-9
),
'yunit': '',
}
# Plot all fits in single figure
if self.options_dict.get('all_fits', False) and self.do_fitting:
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i in range(len(self.amps[qb])):
color = colormap(i/(len(self.amps[qb])-1))
label = f'exp_fit_{qb}_amp_{i}'
fitid = param_values[qb][i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'fig_id': f'T1_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'fig_id': f'T1_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.lengths[qb],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i, :],
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
}
class T2FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
nr_amps = len(self.metadata['amplitudes'])
nr_lengths = len(self.metadata['flux_lengths'])
nr_phases = len(self.metadata['phases'])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(
deepcopy(pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths, nr_phases)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
if self.metadata['use_cal_points']:
pdd['cal_point_data'] = {qb: deepcopy(
pdd['data_to_fit'][qb][
len(pdd['data_to_fit'][qb])-nr_cp:]) for qb in self.qb_names}
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
for i in range(nr_amps):
for j, data in enumerate(pdd['data_reshaped_no_cp'][qb][i]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.metadata['phases'],
data=data,
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}_{j}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.metadata['phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T2'] = {}
pdd['T2_err'] = {}
pdd['phase_contrast'] = {}
nr_lengths = len(self.metadata['flux_lengths'])
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
pdd['phase_contrast'][qb] = {}
exp_mod = fit_mods.ExponentialModel()
for i in range(nr_amps):
pdd['phase_contrast'][qb][f'amp_{i}'] = np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])
self.fit_dicts[f'exp_fit_{qb}_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.metadata['flux_lengths']},
'fit_yvals': {'data': np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])}}
self.run_fitting()
pdd['T2'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_{i}'].best_values['decay'])
for i in range(len(self.metadata['amplitudes']))])
pdd['mask'][qb] = []
for i in range(len(self.metadata['amplitudes'])):
try:
if self.fit_res[f'exp_fit_{qb}_{i}']\
.params['decay'].stderr >= 1e-5:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
mask = pdd['mask'][qb]
label = f'T2_fit_{qb}'
xvals = self.metadata['amplitudes'][mask] if \
self.metadata['frequencies'] is None else \
self.metadata['frequencies'][mask]
xlabel = r'Flux pulse amplitude' if \
self.metadata['frequencies'] is None else \
r'Derived qubit frequency'
self.plot_dicts[label] = {
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': xvals,
'yvals': pdd['T2'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if self.metadata['frequencies'] is None else 'Hz',
'ylabel': r'T2',
'yunit': 's',
'color': 'blue',
}
# Plot all fits in single figure
if not self.options_dict.get('all_fits', False):
continue
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i in range(len(self.metadata['amplitudes'])):
color = colormap(i/(len(self.metadata['frequencies'])-1))
label = f'exp_fit_{qb}_amp_{i}'
freqs = self.metadata['frequencies'] is not None
fitid = self.metadata.get('frequencies',
self.metadata['amplitudes'])[i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'T2_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'T2_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.metadata['phases'],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i,:],
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
}
class MeasurementInducedDephasingAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
rdd = self.raw_data_dict
pdd = self.proc_data_dict
pdd['data_reshaped'] = {qb: [] for qb in pdd['data_to_fit']}
pdd['amps_reshaped'] = np.unique(self.metadata['hard_sweep_params']['ro_amp_scale']['values'])
pdd['phases_reshaped'] = []
for amp in pdd['amps_reshaped']:
mask = self.metadata['hard_sweep_params']['ro_amp_scale']['values'] == amp
pdd['phases_reshaped'].append(self.metadata['hard_sweep_params']['phase']['values'][mask])
for qb in self.qb_names:
pdd['data_reshaped'][qb].append(pdd['data_to_fit'][qb][:len(mask)][mask])
def prepare_fitting(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['phases_reshaped'][i],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['phases_reshaped'][i]},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['sigma'] = {}
pdd['sigma_err'] = {}
pdd['a'] = {}
pdd['a_err'] = {}
pdd['c'] = {}
pdd['c_err'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] += np.pi * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + np.pi) % (2 * np.pi) - np.pi
pdd['phase_offset'][qb] = 180*np.unwrap(pdd['phase_offset'][qb])/np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
gauss_mod = lmfit.models.GaussianModel()
self.fit_dicts[f'phase_contrast_fit_{qb}'] = {
'model': gauss_mod,
'guess_dict': {'center': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_contrast'][qb]}}
quadratic_mod = lmfit.models.QuadraticModel()
self.fit_dicts[f'phase_offset_fit_{qb}'] = {
'model': quadratic_mod,
'guess_dict': {'b': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_offset'][qb]}}
self.run_fitting()
self.save_fit_results()
pdd['sigma'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].best_values['sigma']
pdd['sigma_err'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].params['sigma']. \
stderr
pdd['a'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['a']
pdd['a_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['a'].stderr
pdd['c'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['c']
pdd['c_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['c'].stderr
pdd['sigma_err'][qb] = float('nan') if pdd['sigma_err'][qb] is None \
else pdd['sigma_err'][qb]
pdd['a_err'][qb] = float('nan') if pdd['a_err'][qb] is None else pdd['a_err'][qb]
pdd['c_err'][qb] = float('nan') if pdd['c_err'][qb] is None else pdd['c_err'][qb]
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
phases_equal = True
for phases in pdd['phases_reshaped'][1:]:
if not np.all(phases == pdd['phases_reshaped'][0]):
phases_equal = False
break
for qb in self.qb_names:
if phases_equal:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'xvals': pdd['phases_reshaped'][0],
'yvals': pdd['amps_reshaped'],
'zvals': pdd['data_reshaped'][qb],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'yunit': '',
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['phases_reshaped'][i],
'yvals': pdd['data_reshaped'][qb][i],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': f'amp={amp:.4f}',
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'fit, amp={amp:.4f}',
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_fit_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*self.fit_res[f'phase_contrast_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_labels_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$\sigma = ({:.5f} \pm {:.5f})$ V'.
format(pdd['sigma'][qb], pdd['sigma_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_fit_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': self.fit_res[f'phase_offset_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_labels_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$a = {:.0f} \pm {:.0f}$ deg/V${{}}^2$'.
format(pdd['a'][qb], pdd['a_err'][qb]) + '\n' +
r'$c = {:.1f} \pm {:.1f}$ deg'.
format(pdd['c'][qb], pdd['c_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
class DriveCrosstalkCancellationAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
# get the ramsey phases as the values of the first sweep parameter
# in the 2nd sweep dimension.
# !!! This assumes all qubits have the same ramsey phases !!!
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 1)
pdd['qb_sweep_points'] = {}
pdd['qb_sweep_param'] = {}
for k, v in self.sp.get_sweep_dimension(0).items():
if k == 'phase':
continue
qb, param = k.split('.')
pdd['qb_sweep_points'][qb] = v[0]
pdd['qb_sweep_param'][qb] = (param, v[1], v[2])
pdd['qb_msmt_vals'] = {}
pdd['qb_cal_vals'] = {}
for qb, data in pdd['data_to_fit'].items():
pdd['qb_msmt_vals'][qb] = data[:, :-self.num_cal_points].reshape(
len(pdd['qb_sweep_points'][qb]), len(pdd['ramsey_phases']))
pdd['qb_cal_vals'][qb] = data[0, -self.num_cal_points:]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['ramsey_phases'],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2*self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180/np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_sweep_points'][qb],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': pdd['qb_sweep_param'][qb][2],
'yunit': pdd['qb_sweep_param'][qb][1],
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel='data, ref.'
else:
legendlabel = f'data, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel = 'fit, ref.'
else:
legendlabel = f'fit, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'do_legend': False,
# 'setlabel': legendlabel
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_contrast'][qb][:-1] * 100,
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_ref_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_contrast'][qb][-1] * 100,
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_offset'][qb][:-1],
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_ref_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_offset'][qb][-1],
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
class FluxlineCrosstalkAnalysis(MultiQubit_TimeDomain_Analysis):
"""Analysis for the measure_fluxline_crosstalk measurement.
The measurement involves Ramsey measurements on a set of crosstalk qubits,
which have been brought to a flux-sensitive position with a flux pulse.
The first dimension is the ramsey-phase of these qubits.
In the second sweep dimension, the amplitude of a flux pulse on another
(target) qubit is swept.
The analysis extracts the change in Ramsey phase offset, which gets
converted to a frequency offset due to the flux pulse on the target qubit.
The frequency offset is then converted to a flux offset, which is a measure
of the crosstalk between the target fluxline and the crosstalk qubit.
The measurement is hard-compressed, meaning the raw data is inherently 1d,
with one set of calibration points as the final segments. The experiment
part of the measured values are reshaped to the correct 2d shape for
the analysis. The sweep points passed into the analysis should still reflect
the 2d nature of the measurement, meaning the ramsey phase values should be
passed in the first dimension and the target fluxpulse amplitudes in the
second sweep dimension.
"""
def __init__(self, qb_names, *args, **kwargs):
params_dict = {f'{qbn}.amp_to_freq_model':
f'Instrument settings.{qbn}.fit_ge_freq_from_flux_pulse_amp'
for qbn in qb_names}
kwargs['params_dict'] = kwargs.get('params_dict', {})
kwargs['params_dict'].update(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 0)
pdd['target_amps'] = self.sp.get_sweep_params_property('values', 1)
pdd['target_fluxpulse_length'] = \
self.get_param_value('target_fluxpulse_length')
pdd['crosstalk_qubits_amplitudes'] = \
self.get_param_value('crosstalk_qubits_amplitudes')
pdd['qb_msmt_vals'] = {qb:
pdd['data_to_fit'][qb][:, :-self.num_cal_points].reshape(
len(pdd['target_amps']), len(pdd['ramsey_phases']))
for qb in self.qb_names}
pdd['qb_cal_vals'] = {
qb: pdd['data_to_fit'][qb][0, -self.num_cal_points:]
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
cos_mod = lmfit.Model(fit_mods.CosFunc)
cos_mod.guess = fit_mods.Cos_guess.__get__(cos_mod, cos_mod.__class__)
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'model': cos_mod,
'guess_dict': {'frequency': {'value': 1 / 360,
'vary': False}},
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['freq_offset'] = {}
pdd['freq'] = {}
self.skip_qb_freq_fits = self.get_param_value('skip_qb_freq_fits', False)
if not self.skip_qb_freq_fits:
pdd['flux'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2 * self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180 / np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_offset'][qb] = \
np.unwrap(pdd['phase_offset'][qb] / 180 * np.pi) * 180 / np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
pdd['freq_offset'][qb] = pdd['phase_offset'][qb] / 360 / pdd[
'target_fluxpulse_length']
fr = lmfit.Model(lambda a, f_a=1, f0=0: a * f_a + f0).fit(
data=pdd['freq_offset'][qb], a=pdd['target_amps'])
pdd['freq_offset'][qb] -= fr.best_values['f0']
if not self.skip_qb_freq_fits:
mpars = eval(self.raw_data_dict[f'{qb}.amp_to_freq_model'])
freq_idle = fit_mods.Qubit_dac_to_freq(
pdd['crosstalk_qubits_amplitudes'].get(qb, 0), **mpars)
pdd['freq'][qb] = pdd['freq_offset'][qb] + freq_idle
mpars.update({'V_per_phi0': 1, 'dac_sweet_spot': 0})
pdd['flux'][qb] = fit_mods.Qubit_freq_to_dac(
pdd['freq'][qb], **mpars)
# fit fitted results to linear models
lin_mod = lmfit.Model(lambda x, a=1, b=0: a*x + b)
def guess(model, data, x, **kwargs):
a_guess = (data[-1] - data[0])/(x[-1] - x[0])
b_guess = data[0] - x[0]*a_guess
return model.make_params(a=a_guess, b=b_guess)
lin_mod.guess = guess.__get__(lin_mod, lin_mod.__class__)
keys_to_fit = []
for qb in self.qb_names:
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
key = f'{param}_fit_{qb}'
self.fit_dicts[key] = {
'model': lin_mod,
'fit_xvals': {'x': pdd['target_amps']},
'fit_yvals': {'data': pdd[param][qb]}}
keys_to_fit.append(key)
self.run_fitting(keys_to_fit=keys_to_fit)
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['target_amps'],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'yunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'data, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'fit, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': legendlabel,
'do_legend': False,
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_contrast'][qb] * 100,
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
# Frequency offset
self.plot_dicts[f'freq_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'freq_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['freq_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Freq. offset, $\\Delta f$',
'yunit': 'Hz',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
if not self.skip_qb_freq_fits:
# Flux
self.plot_dicts[f'flux_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'flux_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['flux'][qb],
'xlabel': self.sp[1]['target_amp'][2],
'xunit': self.sp[1]['target_amp'][1],
'ylabel': 'Flux, $\\Phi$',
'yunit': '$\\Phi_0$',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
self.plot_dicts[f'{param}_fit_{qb}'] = {
'ax_id': f'{param}_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[f'{param}_fit_{qb}'],
'plot_init': self.options_dict.get('plot_init', False),
'linestyle': '-',
'marker': '',
'color': 'C1',
}
class RabiAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
for trans_name in ['ge', 'ef']:
params_dict[f'{trans_name}_amp180_'+qbn] = \
s+f'.{trans_name}_amp180'
params_dict[f'{trans_name}_amp90scale_'+qbn] = \
s+f'.{trans_name}_amp90_scale'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=sweep_points, data=data)
guess_pars['amplitude'].vary = True
guess_pars['amplitude'].min = -10
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
self.set_user_guess_pars(guess_pars)
key = 'cos_fit_' + qbn
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
fit_res = self.fit_dicts['cos_fit_' + qbn]['fit_res']
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
self.proc_data_dict['analysis_params_dict'][qbn] = \
self.get_amplitudes(fit_res=fit_res, sweep_points=sweep_points)
self.save_processed_data(key='analysis_params_dict')
def get_amplitudes(self, fit_res, sweep_points):
# Extract the best fitted frequency and phase.
freq_fit = fit_res.best_values['frequency']
phase_fit = fit_res.best_values['phase']
freq_std = fit_res.params['frequency'].stderr
phase_std = fit_res.params['phase'].stderr
# If fitted_phase<0, shift fitted_phase by 4. This corresponds to a
# shift of 2pi in the argument of cos.
if np.abs(phase_fit) < 0.1:
phase_fit = 0
# If phase_fit<1, the piHalf amplitude<0.
if phase_fit < 1:
log.info('The data could not be fitted correctly. '
'The fitted phase "%s" <1, which gives '
'negative piHalf '
'amplitude.' % phase_fit)
stepsize = sweep_points[1] - sweep_points[0]
if freq_fit > 2 * stepsize:
log.info('The data could not be fitted correctly. The '
'frequency "%s" is too high.' % freq_fit)
n = np.arange(-2, 10)
piPulse_vals = (n*np.pi - phase_fit)/(2*np.pi*freq_fit)
piHalfPulse_vals = (n*np.pi + np.pi/2 - phase_fit)/(2*np.pi*freq_fit)
# find piHalfPulse
try:
piHalfPulse = \
np.min(piHalfPulse_vals[piHalfPulse_vals >= sweep_points[1]])
n_piHalf_pulse = n[piHalfPulse_vals==piHalfPulse]
except ValueError:
piHalfPulse = np.asarray([])
if piHalfPulse.size == 0 or piHalfPulse > max(sweep_points):
i = 0
while (piHalfPulse_vals[i] < min(sweep_points) and
i<piHalfPulse_vals.size):
i+=1
piHalfPulse = piHalfPulse_vals[i]
n_piHalf_pulse = n[i]
# find piPulse
try:
if piHalfPulse.size != 0:
piPulse = \
np.min(piPulse_vals[piPulse_vals >= piHalfPulse])
else:
piPulse = np.min(piPulse_vals[piPulse_vals >= 0.001])
n_pi_pulse = n[piHalfPulse_vals == piHalfPulse]
except ValueError:
piPulse = np.asarray([])
if piPulse.size == 0:
i = 0
while (piPulse_vals[i] < min(sweep_points) and
i < piPulse_vals.size):
i += 1
piPulse = piPulse_vals[i]
n_pi_pulse = n[i]
try:
freq_idx = fit_res.var_names.index('frequency')
phase_idx = fit_res.var_names.index('phase')
if fit_res.covar is not None:
cov_freq_phase = fit_res.covar[freq_idx, phase_idx]
else:
cov_freq_phase = 0
except ValueError:
cov_freq_phase = 0
try:
piPulse_std = self.calculate_pulse_stderr(
f=freq_fit,
phi=phase_fit,
f_err=freq_std,
phi_err=phase_std,
period_num=n_pi_pulse,
cov=cov_freq_phase)
piHalfPulse_std = self.calculate_pulse_stderr(
f=freq_fit,
phi=phase_fit,
f_err=freq_std,
phi_err=phase_std,
period_num=n_piHalf_pulse,
cov=cov_freq_phase)
except Exception as e:
log.error(e)
piPulse_std = 0
piHalfPulse_std = 0
rabi_amplitudes = {'piPulse': piPulse,
'piPulse_stderr': piPulse_std,
'piHalfPulse': piHalfPulse,
'piHalfPulse_stderr': piHalfPulse_std}
return rabi_amplitudes
def calculate_pulse_stderr(self, f, phi, f_err, phi_err,
period_num, cov=0):
x = period_num + phi
return np.sqrt((f_err*x/(2*np.pi*(f**2)))**2 +
(phi_err/(2*np.pi*f))**2 -
2*(cov**2)*x/((2*np.pi*(f**3))**2))[0]
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
base_plot_name = 'Rabi_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
fit_res = self.fit_dicts['cos_fit_' + qbn]['fit_res']
self.plot_dicts['fit_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'cosine fit',
'color': 'r',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
rabi_amplitudes = self.proc_data_dict['analysis_params_dict']
self.plot_dicts['piamp_marker_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([rabi_amplitudes[qbn]['piPulse']]),
'yvals': np.array([fit_res.model.func(
rabi_amplitudes[qbn]['piPulse'],
**fit_res.best_values)]),
'setlabel': '$\pi$-Pulse amp',
'color': 'r',
'marker': 'o',
'line_kws': {'markersize': 10},
'linestyle': '',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
self.plot_dicts['piamp_hline_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': [fit_res.model.func(
rabi_amplitudes[qbn]['piPulse'],
**fit_res.best_values)],
'xmin': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-1],
'colors': 'gray'}
self.plot_dicts['pihalfamp_marker_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.array([rabi_amplitudes[qbn]['piHalfPulse']]),
'yvals': np.array([fit_res.model.func(
rabi_amplitudes[qbn]['piHalfPulse'],
**fit_res.best_values)]),
'setlabel': '$\pi /2$-Pulse amp',
'color': 'm',
'marker': 'o',
'line_kws': {'markersize': 10},
'linestyle': '',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
self.plot_dicts['pihalfamp_hline_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': [fit_res.model.func(
rabi_amplitudes[qbn]['piHalfPulse'],
**fit_res.best_values)],
'xmin': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-1],
'colors': 'gray'}
trans_name = 'ef' if 'f' in self.data_to_fit[qbn] else 'ge'
old_pipulse_val = self.raw_data_dict[
f'{trans_name}_amp180_'+qbn]
if old_pipulse_val != old_pipulse_val:
old_pipulse_val = 0
old_pihalfpulse_val = self.raw_data_dict[
f'{trans_name}_amp90scale_'+qbn]
if old_pihalfpulse_val != old_pihalfpulse_val:
old_pihalfpulse_val = 0
old_pihalfpulse_val *= old_pipulse_val
textstr = (' $\pi-Amp$ = {:.3f} V'.format(
rabi_amplitudes[qbn]['piPulse']) +
' $\pm$ {:.3f} V '.format(
rabi_amplitudes[qbn]['piPulse_stderr']) +
'\n$\pi/2-Amp$ = {:.3f} V '.format(
rabi_amplitudes[qbn]['piHalfPulse']) +
' $\pm$ {:.3f} V '.format(
rabi_amplitudes[qbn]['piHalfPulse_stderr']) +
'\n $\pi-Amp_{old}$ = ' + '{:.3f} V '.format(
old_pipulse_val) +
'\n$\pi/2-Amp_{old}$ = ' + '{:.3f} V '.format(
old_pihalfpulse_val))
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class T1Analysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
for trans_name in ['ge', 'ef']:
params_dict[f'{trans_name}_T1_'+qbn] = s+'.T1{}'.format(
'_ef' if trans_name == 'ef' else '')
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
exp_decay_mod = lmfit.Model(fit_mods.ExpDecayFunc)
guess_pars = fit_mods.exp_dec_guess(
model=exp_decay_mod, data=data, t=sweep_points)
guess_pars['amplitude'].vary = True
guess_pars['tau'].vary = True
if self.options_dict.get('vary_offset', False):
guess_pars['offset'].vary = True
else:
guess_pars['offset'].value = 0
guess_pars['offset'].vary = False
self.set_user_guess_pars(guess_pars)
key = 'exp_decay_' + qbn
self.fit_dicts[key] = {
'fit_fn': exp_decay_mod.func,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn]['T1'] = \
self.fit_dicts['exp_decay_' + qbn]['fit_res'].best_values['tau']
self.proc_data_dict['analysis_params_dict'][qbn]['T1_stderr'] = \
self.fit_dicts['exp_decay_' + qbn]['fit_res'].params[
'tau'].stderr
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
# rename base plot
base_plot_name = 'T1_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
self.plot_dicts['fit_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['exp_decay_' + qbn]['fit_res'],
'setlabel': 'exp decay fit',
'do_legend': True,
'color': 'r',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
trans_name = 'ef' if 'f' in self.data_to_fit[qbn] else 'ge'
old_T1_val = self.raw_data_dict[f'{trans_name}_T1_'+qbn]
if old_T1_val != old_T1_val:
old_T1_val = 0
T1_dict = self.proc_data_dict['analysis_params_dict']
textstr = '$T_1$ = {:.2f} $\mu$s'.format(
T1_dict[qbn]['T1']*1e6) \
+ ' $\pm$ {:.2f} $\mu$s'.format(
T1_dict[qbn]['T1_stderr']*1e6) \
+ '\nold $T_1$ = {:.2f} $\mu$s'.format(old_T1_val*1e6)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class RamseyAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
for trans_name in ['ge', 'ef']:
params_dict[f'{trans_name}_freq_'+qbn] = s+f'.{trans_name}_freq'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def prepare_fitting(self):
if self.options_dict.get('fit_gaussian_decay', True):
self.fit_keys = ['exp_decay_', 'gauss_decay_']
else:
self.fit_keys = ['exp_decay_']
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
for i, key in enumerate([k + qbn for k in self.fit_keys]):
exp_damped_decay_mod = lmfit.Model(fit_mods.ExpDampOscFunc)
guess_pars = fit_mods.exp_damp_osc_guess(
model=exp_damped_decay_mod, data=data, t=sweep_points,
n_guess=i+1)
guess_pars['amplitude'].vary = False
guess_pars['amplitude'].value = 0.5
guess_pars['frequency'].vary = True
guess_pars['tau'].vary = True
guess_pars['phase'].vary = True
guess_pars['n'].vary = False
guess_pars['oscillation_offset'].vary = \
'f' in self.data_to_fit[qbn]
# guess_pars['exponential_offset'].value = 0.5
guess_pars['exponential_offset'].vary = True
self.set_user_guess_pars(guess_pars)
self.fit_dicts[key] = {
'fit_fn': exp_damped_decay_mod .func,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
if 'artificial_detuning' in self.options_dict:
artificial_detuning_dict = OrderedDict(
[(qbn, self.options_dict['artificial_detuning'])
for qbn in self.qb_names])
elif 'artificial_detuning_dict' in self.metadata:
artificial_detuning_dict = self.metadata[
'artificial_detuning_dict']
elif 'artificial_detuning' in self.metadata:
artificial_detuning_dict = OrderedDict(
[(qbn, self.metadata['artificial_detuning'])
for qbn in self.qb_names])
else:
raise ValueError('"artificial_detuning" not found.')
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
for key in [k + qbn for k in self.fit_keys]:
self.proc_data_dict['analysis_params_dict'][qbn][key] = \
OrderedDict()
fit_res = self.fit_dicts[key]['fit_res']
for par in fit_res.params:
if fit_res.params[par].stderr is None:
fit_res.params[par].stderr = 0
trans_name = 'ef' if 'f' in self.data_to_fit[qbn] else 'ge'
old_qb_freq = self.raw_data_dict[f'{trans_name}_freq_'+qbn]
if old_qb_freq != old_qb_freq:
old_qb_freq = 0
self.proc_data_dict['analysis_params_dict'][qbn][key][
'old_qb_freq'] = old_qb_freq
self.proc_data_dict['analysis_params_dict'][qbn][key][
'new_qb_freq'] = old_qb_freq + \
artificial_detuning_dict[qbn] - \
fit_res.best_values['frequency']
self.proc_data_dict['analysis_params_dict'][qbn][key][
'new_qb_freq_stderr'] = fit_res.params['frequency'].stderr
self.proc_data_dict['analysis_params_dict'][qbn][key][
'T2_star'] = fit_res.best_values['tau']
self.proc_data_dict['analysis_params_dict'][qbn][key][
'T2_star_stderr'] = fit_res.params['tau'].stderr
self.proc_data_dict['analysis_params_dict'][qbn][key][
'artificial_detuning'] = artificial_detuning_dict[qbn]
hdf_group_name_suffix = self.options_dict.get(
'hdf_group_name_suffix', '')
self.save_processed_data(key='analysis_params_dict' +
hdf_group_name_suffix)
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
ramsey_dict = self.proc_data_dict['analysis_params_dict']
for qbn in self.qb_names:
base_plot_name = 'Ramsey_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
exp_decay_fit_key = self.fit_keys[0] + qbn
old_qb_freq = ramsey_dict[qbn][
exp_decay_fit_key]['old_qb_freq']
textstr = ''
T2_star_str = ''
for i, key in enumerate([k + qbn for k in self.fit_keys]):
fit_res = self.fit_dicts[key]['fit_res']
self.plot_dicts['fit_' + key] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'exp decay fit' if i == 0 else
'gauss decay fit',
'do_legend': True,
'color': 'r' if i == 0 else 'C4',
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
if i != 0:
textstr += '\n'
textstr += \
('$f_{{qubit \_ new \_ {{{key}}} }}$ = '.format(
key=('exp' if i == 0 else 'gauss')) +
'{:.6f} GHz '.format(
ramsey_dict[qbn][key]['new_qb_freq']*1e-9) +
'$\pm$ {:.2E} GHz '.format(
ramsey_dict[qbn][key][
'new_qb_freq_stderr']*1e-9))
T2_star_str += \
('\n$T_{{2,{{{key}}} }}^\star$ = '.format(
key=('exp' if i == 0 else 'gauss')) +
'{:.2f} $\mu$s'.format(
fit_res.params['tau'].value*1e6) +
'$\pm$ {:.2f} $\mu$s'.format(
fit_res.params['tau'].stderr*1e6))
textstr += '\n$f_{qubit \_ old}$ = '+'{:.6f} GHz '.format(
old_qb_freq*1e-9)
textstr += ('\n$\Delta f$ = {:.4f} MHz '.format(
(ramsey_dict[qbn][exp_decay_fit_key]['new_qb_freq'] -
old_qb_freq)*1e-6) + '$\pm$ {:.2E} MHz'.format(
self.fit_dicts[exp_decay_fit_key]['fit_res'].params[
'frequency'].stderr*1e-6) +
'\n$f_{Ramsey}$ = '+'{:.4f} MHz $\pm$ {:.2E} MHz'.format(
self.fit_dicts[exp_decay_fit_key]['fit_res'].params[
'frequency'].value*1e-6,
self.fit_dicts[exp_decay_fit_key]['fit_res'].params[
'frequency'].stderr*1e-6))
textstr += T2_star_str
textstr += '\nartificial detuning = {:.2f} MHz'.format(
ramsey_dict[qbn][exp_decay_fit_key][
'artificial_detuning']*1e-6)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': -0.025,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
self.plot_dicts['half_hline_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': 0.5,
'xmin': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-1],
'colors': 'gray'}
class QScaleAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
for trans_name in ['ge', 'ef']:
params_dict[f'{trans_name}_qscale_'+qbn] = \
s+f'.{trans_name}_motzoi'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
self.proc_data_dict['qscale_data'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['qscale_data'][qbn] = OrderedDict()
sweep_points = deepcopy(self.proc_data_dict['sweep_points_dict'][
qbn]['msmt_sweep_points'])
# check if the sweep points are repeated 3 times as they have to be
# for the qscale analysis:
# Takes the first 3 entries and check if they are all the same or different.
# Needed For backwards compatibility with QudevTransmon.measure_qscale()
# that does not (yet) use Sweeppoints object.
unique_sp = np.unique(sweep_points[:3])
if unique_sp.size > 1:
sweep_points = np.repeat(sweep_points, 3)
# replace in proc_data_dict; otherwise plotting in base class fails
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = sweep_points
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'] = np.concatenate([
sweep_points, self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points']])
data = self.proc_data_dict['data_to_fit'][qbn]
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
self.proc_data_dict['qscale_data'][qbn]['sweep_points_xx'] = \
sweep_points[0::3]
self.proc_data_dict['qscale_data'][qbn]['sweep_points_xy'] = \
sweep_points[1::3]
self.proc_data_dict['qscale_data'][qbn]['sweep_points_xmy'] = \
sweep_points[2::3]
self.proc_data_dict['qscale_data'][qbn]['data_xx'] = \
data[0::3]
self.proc_data_dict['qscale_data'][qbn]['data_xy'] = \
data[1::3]
self.proc_data_dict['qscale_data'][qbn]['data_xmy'] = \
data[2::3]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
for msmt_label in ['_xx', '_xy', '_xmy']:
sweep_points = self.proc_data_dict['qscale_data'][qbn][
'sweep_points' + msmt_label]
data = self.proc_data_dict['qscale_data'][qbn][
'data' + msmt_label]
# As a workaround for a weird bug letting crash the analysis
# every second time, we do not use lmfit.models.ConstantModel
# and lmfit.models.LinearModel, but create custom models.
if msmt_label == '_xx':
model = lmfit.Model(lambda x, c: c)
guess_pars = model.make_params(c=np.mean(data))
else:
model = lmfit.Model(lambda x, slope, intercept:
slope * x + intercept)
slope = (data[-1] - data[0]) / \
(sweep_points[-1] - sweep_points[0])
intercept = data[-1] - slope * sweep_points[-1]
guess_pars = model.make_params(slope=slope,
intercept=intercept)
self.set_user_guess_pars(guess_pars)
key = 'fit' + msmt_label + '_' + qbn
self.fit_dicts[key] = {
'fit_fn': model.func,
'fit_xvals': {'x': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
# The best qscale parameter is the point where all 3 curves intersect.
threshold = 0.02
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
fitparams0 = self.fit_dicts['fit_xx'+'_'+qbn]['fit_res'].params
fitparams1 = self.fit_dicts['fit_xy'+'_'+qbn]['fit_res'].params
fitparams2 = self.fit_dicts['fit_xmy'+'_'+qbn]['fit_res'].params
intercept_diff_mean = fitparams1['intercept'].value - \
fitparams2['intercept'].value
slope_diff_mean = fitparams2['slope'].value - \
fitparams1['slope'].value
optimal_qscale = intercept_diff_mean/slope_diff_mean
# Warning if Xpi/2Xpi line is not within +/-threshold of 0.5
if (fitparams0['c'].value > (0.5 + threshold)) or \
(fitparams0['c'].value < (0.5 - threshold)):
log.warning('The trace from the X90-X180 pulses is '
'NOT within $\pm${} of the expected value '
'of 0.5.'.format(threshold))
# Warning if optimal_qscale is not within +/-threshold of 0.5
y_optimal_qscale = optimal_qscale * fitparams2['slope'].value + \
fitparams2['intercept'].value
if (y_optimal_qscale > (0.5 + threshold)) or \
(y_optimal_qscale < (0.5 - threshold)):
log.warning('The optimal qscale found gives a population '
'that is NOT within $\pm${} of the expected '
'value of 0.5.'.format(threshold))
# Calculate standard deviation
intercept_diff_std_squared = \
fitparams1['intercept'].stderr**2 + \
fitparams2['intercept'].stderr**2
slope_diff_std_squared = \
fitparams2['slope'].stderr**2 + fitparams1['slope'].stderr**2
optimal_qscale_stderr = np.sqrt(
intercept_diff_std_squared*(1/slope_diff_mean**2) +
slope_diff_std_squared*(intercept_diff_mean /
(slope_diff_mean**2))**2)
self.proc_data_dict['analysis_params_dict'][qbn]['qscale'] = \
optimal_qscale
self.proc_data_dict['analysis_params_dict'][qbn][
'qscale_stderr'] = optimal_qscale_stderr
def prepare_plots(self):
super().prepare_plots()
color_dict = {'_xx': '#365C91',
'_xy': '#683050',
'_xmy': '#3C7541'}
label_dict = {'_xx': r'$X_{\pi/2}X_{\pi}$',
'_xy': r'$X_{\pi/2}Y_{\pi}$',
'_xmy': r'$X_{\pi/2}Y_{-\pi}$'}
for qbn in self.qb_names:
base_plot_name = 'Qscale_' + qbn
for msmt_label in ['_xx', '_xy', '_xmy']:
sweep_points = self.proc_data_dict['qscale_data'][qbn][
'sweep_points' + msmt_label]
data = self.proc_data_dict['qscale_data'][qbn][
'data' + msmt_label]
if msmt_label == '_xx':
plot_name = base_plot_name
else:
plot_name = 'data' + msmt_label + '_' + qbn
xlabel, xunit = self.get_xaxis_label_unit(qbn)
self.plot_dicts[plot_name] = {
'plotfn': self.plot_line,
'xvals': sweep_points,
'xlabel': xlabel,
'xunit': xunit,
'yvals': data,
'ylabel': '{} state population'.format(
self.get_latex_prob_label(self.data_to_fit[qbn])),
'yunit': '',
'setlabel': 'Data\n' + label_dict[msmt_label],
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] +
'\n' + qbn),
'linestyle': 'none',
'color': color_dict[msmt_label],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
if msmt_label != '_xx':
self.plot_dicts[plot_name]['fig_id'] = base_plot_name
if self.do_fitting:
# plot fit
xfine = np.linspace(sweep_points[0], sweep_points[-1], 1000)
fit_key = 'fit' + msmt_label + '_' + qbn
fit_res = self.fit_dicts[fit_key]['fit_res']
yvals = fit_res.model.func(xfine, **fit_res.best_values)
if not hasattr(yvals, '__iter__'):
yvals = np.array(len(xfine)*[yvals])
self.plot_dicts[fit_key] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': xfine,
'yvals': yvals,
'marker': '',
'setlabel': 'Fit\n' + label_dict[msmt_label],
'do_legend': True,
'color': color_dict[msmt_label],
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
trans_name = 'ef' if 'f' in self.data_to_fit[qbn] else 'ge'
old_qscale_val = self.raw_data_dict[
f'{trans_name}_qscale_'+qbn]
if old_qscale_val != old_qscale_val:
old_qscale_val = 0
textstr = 'Qscale = {:.4f} $\pm$ {:.4f}'.format(
self.proc_data_dict['analysis_params_dict'][qbn][
'qscale'],
self.proc_data_dict['analysis_params_dict'][qbn][
'qscale_stderr']) + \
'\nold Qscale= {:.4f}'.format(old_qscale_val)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.175,
'xpos': 0.5,
'horizontalalignment': 'center',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
# plot cal points
if self.num_cal_points != 0:
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
plot_dict_name = list(self.cal_states_dict)[i] + \
'_' + qbn
self.plot_dicts[plot_dict_name] = {
'fig_id': base_plot_name,
'plotfn': self.plot_line,
'xvals': np.mean([
self.proc_data_dict['sweep_points_dict'][qbn]
['cal_points_sweep_points'][cal_pts_idxs],
self.proc_data_dict['sweep_points_dict'][qbn]
['cal_points_sweep_points'][cal_pts_idxs]],
axis=0),
'yvals': self.proc_data_dict[
'data_to_fit'][qbn][cal_pts_idxs],
'setlabel': list(self.cal_states_dict)[i],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
self.plot_dicts[plot_dict_name + '_line'] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': np.mean(
self.proc_data_dict[
'data_to_fit'][qbn][cal_pts_idxs]),
'xmin': self.proc_data_dict['sweep_points_dict'][
qbn]['sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][
qbn]['sweep_points'][-1],
'colors': 'gray'}
class EchoAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, auto=False, **kwargs)
if self.options_dict.get('artificial_detuning', None) is not None:
self.echo_analysis = RamseyAnalysis(*args, auto=False, **kwargs)
else:
if 'options_dict' in kwargs:
# kwargs.pop('options_dict')
kwargs['options_dict'].update({'vary_offset': True})
else:
kwargs['options_dict'] = {'vary_offset': True}
self.echo_analysis = T1Analysis(*args, auto=False, **kwargs)
if auto:
self.echo_analysis.extract_data()
self.echo_analysis.process_data()
self.echo_analysis.prepare_fitting()
self.echo_analysis.run_fitting()
self.echo_analysis.save_fit_results()
self.analyze_fit_results()
self.prepare_plots()
def analyze_fit_results(self):
self.echo_analysis.analyze_fit_results()
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
params_dict = self.echo_analysis.proc_data_dict[
'analysis_params_dict'][qbn]
if 'T1' in params_dict:
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo'] = params_dict['T1']
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo_stderr'] = params_dict['T1_stderr']
else:
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo'] = params_dict['exp_decay_'+qbn][
'T2_star']
self.proc_data_dict['analysis_params_dict'][qbn][
'T2_echo_stderr'] = params_dict['exp_decay_'+qbn][
'T2_star_stderr']
def prepare_plots(self):
self.echo_analysis.prepare_plots()
for qbn in self.qb_names:
# rename base plot
figure_name = 'Echo_' + qbn
echo_plot_key_t1 = [key for key in self.echo_analysis.plot_dicts if
'T1_'+qbn in key]
echo_plot_key_ram = [key for key in self.echo_analysis.plot_dicts if
'Ramsey_'+qbn in key]
if len(echo_plot_key_t1) != 0:
echo_plot_name = echo_plot_key_t1[0]
elif len(echo_plot_key_ram) != 0:
echo_plot_name = echo_plot_key_ram[0]
else:
raise ValueError('Neither T1 nor Ramsey plots were found.')
self.echo_analysis.plot_dicts[echo_plot_name][
'legend_pos'] = 'upper right'
self.echo_analysis.plot_dicts[echo_plot_name][
'legend_bbox_to_anchor'] = (1, -0.15)
for plot_label in self.echo_analysis.plot_dicts:
if qbn in plot_label:
if 'raw' not in plot_label and 'projected' not in plot_label:
self.echo_analysis.plot_dicts[plot_label]['fig_id'] = \
figure_name
old_T2e_val = a_tools.get_instr_setting_value_from_file(
file_path=self.echo_analysis.raw_data_dict['folder'],
instr_name=qbn, param_name='T2{}'.format(
'_ef' if 'f' in self.echo_analysis.data_to_fit[qbn]
else ''))
T2_dict = self.proc_data_dict['analysis_params_dict']
textstr = '$T_2$ echo = {:.2f} $\mu$s'.format(
T2_dict[qbn]['T2_echo']*1e6) \
+ ' $\pm$ {:.2f} $\mu$s'.format(
T2_dict[qbn]['T2_echo_stderr']*1e6) \
+ '\nold $T_2$ echo = {:.2f} $\mu$s'.format(
old_T2e_val*1e6)
self.echo_analysis.plot_dicts['text_msg_' + qbn][
'text_string'] = textstr
self.echo_analysis.plot(key_list='auto')
self.echo_analysis.save_figures(close_figs=True)
class RamseyAddPulseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, auto=False, **kwargs)
options_dict = kwargs.pop('options_dict', OrderedDict())
options_dict_no = deepcopy(options_dict)
options_dict_no.update(dict(
data_filter=lambda raw: np.concatenate([
raw[:-4][1::2], raw[-4:]]),
hdf_group_name_suffix='_no_pulse'))
self.ramsey_analysis = RamseyAnalysis(
*args, auto=False, options_dict=options_dict_no,
**kwargs)
options_dict_with = deepcopy(options_dict)
options_dict_with.update(dict(
data_filter=lambda raw: np.concatenate([
raw[:-4][0::2], raw[-4:]]),
hdf_group_name_suffix='_with_pulse'))
self.ramsey_add_pulse_analysis = RamseyAnalysis(
*args, auto=False, options_dict=options_dict_with,
**kwargs)
if auto:
self.ramsey_analysis.extract_data()
self.ramsey_analysis.process_data()
self.ramsey_analysis.prepare_fitting()
self.ramsey_analysis.run_fitting()
self.ramsey_analysis.save_fit_results()
self.ramsey_add_pulse_analysis.extract_data()
self.ramsey_add_pulse_analysis.process_data()
self.ramsey_add_pulse_analysis.prepare_fitting()
self.ramsey_add_pulse_analysis.run_fitting()
self.ramsey_add_pulse_analysis.save_fit_results()
self.raw_data_dict = self.ramsey_analysis.raw_data_dict
self.analyze_fit_results()
self.prepare_plots()
keylist = []
for qbn in self.qb_names:
figure_name = 'CrossZZ_' + qbn
keylist.append(figure_name+'with')
keylist.append(figure_name+'no')
self.plot()
self.save_figures(close_figs=True)
def analyze_fit_results(self):
self.cross_kerr = 0.0
self.ramsey_analysis.analyze_fit_results()
self.ramsey_add_pulse_analysis.analyze_fit_results()
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.params_dict_ramsey = self.ramsey_analysis.proc_data_dict[
'analysis_params_dict'][qbn]
self.params_dict_add_pulse = \
self.ramsey_add_pulse_analysis.proc_data_dict[
'analysis_params_dict'][qbn]
self.cross_kerr = self.params_dict_ramsey[
'exp_decay_'+str(qbn)]['new_qb_freq'] \
- self.params_dict_add_pulse[
'exp_decay_'+str(qbn)]['new_qb_freq']
self.cross_kerr_error = np.sqrt(
(self.params_dict_ramsey[
'exp_decay_'+str(qbn)]['new_qb_freq_stderr'])**2 +
(self.params_dict_add_pulse[
'exp_decay_' + str(qbn)]['new_qb_freq_stderr'])**2)
def prepare_plots(self):
self.ramsey_analysis.prepare_plots()
self.ramsey_add_pulse_analysis.prepare_plots()
self.ramsey_analysis.plot(key_list='auto')
self.ramsey_analysis.save_figures(close_figs=True, savebase='Ramsey_no')
self.ramsey_add_pulse_analysis.plot(key_list='auto')
self.ramsey_add_pulse_analysis.save_figures(close_figs=True,
savebase='Ramsey_with')
self.options_dict['plot_proj_data'] = False
self.metadata = {'plot_proj_data': False, 'plot_raw_data': False}
super().prepare_plots()
try:
xunit = self.metadata["sweep_unit"]
xlabel = self.metadata["sweep_name"]
except KeyError:
xlabel = self.raw_data_dict['sweep_parameter_names'][0]
xunit = self.raw_data_dict['sweep_parameter_units'][0]
if np.ndim(xunit) > 0:
xunit = xunit[0]
title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'])
for qbn in self.qb_names:
data_no = self.ramsey_analysis.proc_data_dict['data_to_fit'][
qbn][:-self.ramsey_analysis.num_cal_points]
data_with = self.ramsey_add_pulse_analysis.proc_data_dict[
'data_to_fit'][
qbn][:-self.ramsey_analysis.num_cal_points]
delays = self.ramsey_analysis.proc_data_dict['sweep_points_dict'][
qbn]['sweep_points'][
:-self.ramsey_analysis.num_cal_points]
figure_name = 'CrossZZ_' + qbn
self.plot_dicts[figure_name+'with'] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'xvals': delays,
'yvals': data_with,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': '|e> state population',
'setlabel': 'with $\\pi$-pulse',
'title': title,
'color': 'r',
'marker': 'o',
'line_kws': {'markersize': 5},
'linestyle': 'none',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
if self.do_fitting:
fit_res_with = self.ramsey_add_pulse_analysis.fit_dicts[
'exp_decay_' + qbn]['fit_res']
self.plot_dicts['fit_with_'+qbn] = {
'fig_id': figure_name,
'plotfn': self.plot_fit,
'xlabel': 'Ramsey delay',
'xunit': 's',
'fit_res': fit_res_with,
'setlabel': 'with $\\pi$-pulse - fit',
'title': title,
'do_legend': True,
'color': 'r',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
self.plot_dicts[figure_name+'no'] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'xvals': delays,
'yvals': data_no,
'setlabel': 'no $\\pi$-pulse',
'title': title,
'color': 'g',
'marker': 'o',
'line_kws': {'markersize': 5},
'linestyle': 'none',
'do_legend': True,
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
if self.do_fitting:
fit_res_no = self.ramsey_analysis.fit_dicts[
'exp_decay_' + qbn]['fit_res']
self.plot_dicts['fit_no_'+qbn] = {
'fig_id': figure_name,
'plotfn': self.plot_fit,
'xlabel': 'Ramsey delay',
'xunit': 's',
'fit_res': fit_res_no,
'setlabel': 'no $\\pi$-pulse - fit',
'title': title,
'do_legend': True,
'color': 'g',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
textstr = r'$\alpha ZZ$ = {:.2f} +- {:.2f}'.format(
self.cross_kerr*1e-3, self.cross_kerr_error*1e-3) + ' kHz'
self.plot_dicts['text_msg_' + qbn] = {'fig_id': figure_name,
'text_string': textstr,
'ypos': -0.2,
'xpos': -0.075,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text}
class OverUnderRotationAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['projected_data_dict'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
model = lmfit.models.LinearModel()
guess_pars = model.guess(data=data, x=sweep_points)
guess_pars['intercept'].value = 0.5
guess_pars['intercept'].vary = False
key = 'fit_' + qbn
self.fit_dicts[key] = {
'fit_fn': model.func,
'fit_xvals': {'x': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
try:
old_amp180 = a_tools.get_instr_setting_value_from_file(
file_path=self.raw_data_dict['folder'][0],
instr_name=qbn, param_name='amp180{}'.format(
'_ef' if 'f' in self.data_to_fit[qbn] else ''))
except KeyError:
old_amp180 = a_tools.get_instr_setting_value_from_file(
file_path=self.raw_data_dict['folder'][0],
instr_name=qbn, param_name='{}_amp180'.format(
'ef' if 'f' in self.data_to_fit[qbn] else 'ge'))
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn][
'corrected_amp'] = old_amp180 - self.fit_dicts[
'fit_' + qbn]['fit_res'].best_values['slope']*old_amp180
self.proc_data_dict['analysis_params_dict'][qbn][
'corrected_amp_stderr'] = self.fit_dicts[
'fit_' + qbn]['fit_res'].params['slope'].stderr*old_amp180
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for qbn in self.qb_names:
# rename base plot
if self.fit_dicts['fit_' + qbn][
'fit_res'].best_values['slope'] >= 0:
base_plot_name = 'OverRotation_' + qbn
else:
base_plot_name = 'UnderRotation_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=self.proc_data_dict['data_to_fit'][qbn],
plot_name_suffix=qbn+'fit',
qb_name=qbn)
self.plot_dicts['fit_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit_' + qbn]['fit_res'],
'setlabel': 'linear fit',
'do_legend': True,
'color': 'r',
'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'}
try:
old_amp180 = a_tools.get_instr_setting_value_from_file(
file_path=self.raw_data_dict['folder'][0],
instr_name=qbn, param_name='amp180{}'.format(
'_ef' if 'f' in self.data_to_fit[qbn] else ''))
except KeyError:
old_amp180 = a_tools.get_instr_setting_value_from_file(
file_path=self.raw_data_dict['folder'][0],
instr_name=qbn, param_name='{}_amp180'.format(
'ef' if 'f' in self.data_to_fit[qbn] else 'ge'))
correction_dict = self.proc_data_dict['analysis_params_dict']
fit_res = self.fit_dicts['fit_' + qbn]['fit_res']
textstr = '$\pi$-Amp = {:.4f} mV'.format(
correction_dict[qbn]['corrected_amp']*1e3) \
+ ' $\pm$ {:.1e} mV'.format(
correction_dict[qbn]['corrected_amp_stderr']*1e3) \
+ '\nold $\pi$-Amp = {:.4f} mV'.format(
old_amp180*1e3) \
+ '\namp. correction = {:.4f} mV'.format(
fit_res.best_values['slope']*old_amp180*1e3) \
+ '\nintercept = {:.2f}'.format(
fit_res.best_values['intercept'])
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.2,
'xpos': 0,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
self.plot_dicts['half_hline_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_hlines,
'y': 0.5,
'xmin': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-1],
'colors': 'gray'}
class MultiCZgate_Calib_Analysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
options_dict = kwargs.pop('options_dict', {})
options_dict.update({'TwoD': True})
kwargs.update({'options_dict': options_dict})
self.phase_key = 'phase_diffs'
self.legend_label_func = lambda qbn, row: ''
super().__init__(*args, **kwargs)
def process_data(self):
super().process_data()
# Find leakage and ramsey qubit names
self.leakage_qbnames = self.get_param_value('leakage_qbnames',
default_value=[])
self.ramsey_qbnames = self.get_param_value('ramsey_qbnames',
default_value=[])
self.gates_list = self.get_param_value('gates_list', default_value=[])
if not len(self.gates_list):
leakage_qbnames_temp = len(self.ramsey_qbnames) * ['']
self.gates_list = [(qbl, qbr) for qbl, qbr in
zip(leakage_qbnames_temp, self.ramsey_qbnames)]
# TODO: Steph 15.09.2020
# This is a hack. It should be done in MultiQubit_TimeDomain_Analysis
# but would break every analysis inheriting from it but we just needed
# it to work for this analysis :)
self.data_to_fit = self.get_param_value('data_to_fit', {})
for qbn in self.data_to_fit:
# make values of data_to_fit be lists
if isinstance(self.data_to_fit[qbn], str):
self.data_to_fit[qbn] = [self.data_to_fit[qbn]]
# Overwrite data_to_fit in proc_data_dict
self.proc_data_dict['data_to_fit'] = OrderedDict()
for qbn, prob_data in self.proc_data_dict[
'projected_data_dict'].items():
if qbn in self.data_to_fit:
self.proc_data_dict['data_to_fit'][qbn] = {
prob_label: prob_data[prob_label] for prob_label in
self.data_to_fit[qbn]}
# Make sure data has the right shape (len(hard_sp), len(soft_sp))
for qbn, prob_data in self.proc_data_dict['data_to_fit'].items():
for prob_label, data in prob_data.items():
if data.shape[1] != self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points'].size:
self.proc_data_dict['data_to_fit'][qbn][prob_label] = data.T
# reshape data for ease of use
qbn = self.qb_names[0]
phase_sp_param_name = [p for p in self.mospm[qbn] if 'phase' in p][0]
phases = self.sp.get_sweep_params_property('values', 0,
phase_sp_param_name)
self.dim_scale_factor = len(phases) // len(np.unique(phases))
self.proc_data_dict['data_to_fit_reshaped'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['data_to_fit_reshaped'][qbn] = {
prob_label: np.reshape(
self.proc_data_dict['data_to_fit'][qbn][prob_label][
:, :-self.num_cal_points],
(self.dim_scale_factor * \
self.proc_data_dict['data_to_fit'][qbn][prob_label][
:, :-self.num_cal_points].shape[0],
self.proc_data_dict['data_to_fit'][qbn][prob_label][
:, :-self.num_cal_points].shape[1]//self.dim_scale_factor))
for prob_label in self.proc_data_dict['data_to_fit'][qbn]}
# convert phases to radians
for qbn in self.qb_names:
sweep_dict = self.proc_data_dict['sweep_points_dict'][qbn]
sweep_dict['sweep_points'] *= np.pi/180
def plot_traces(self, prob_label, data_2d, qbn):
plotsize = self.get_default_plot_params(set=False)[
'figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
if data_2d.shape[1] != self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points'].size:
data_2d = data_2d.T
data_2d_reshaped = np.reshape(
data_2d[:, :-self.num_cal_points],
(self.dim_scale_factor*data_2d[:, :-self.num_cal_points].shape[0],
data_2d[:, :-self.num_cal_points].shape[1]//self.dim_scale_factor))
data_2d_cal_reshaped = [[data_2d[:, -self.num_cal_points:]]] * \
(self.dim_scale_factor *
data_2d[:, :-self.num_cal_points].shape[0])
ref_states_plot_dicts = {}
for row in range(data_2d_reshaped.shape[0]):
phases = np.unique(self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'])
data = data_2d_reshaped[row, :]
legend_bbox_to_anchor = (1, -0.15)
legend_pos = 'upper right'
legend_ncol = 2
if qbn in self.ramsey_qbnames and self.get_latex_prob_label(
prob_label) in [self.get_latex_prob_label(pl)
for pl in self.data_to_fit[qbn]]:
figure_name = '{}_{}_{}'.format(self.phase_key, qbn, prob_label)
elif qbn in self.leakage_qbnames and self.get_latex_prob_label(
prob_label) in [self.get_latex_prob_label(pl)
for pl in self.data_to_fit[qbn]]:
figure_name = 'Leakage_{}_{}'.format(qbn, prob_label)
else:
figure_name = 'projected_plot_' + qbn + '_' + \
prob_label
# plot cal points
if self.num_cal_points > 0:
data_w_cal = data_2d_cal_reshaped[row][0][0]
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
s = '{}_{}_{}'.format(row, qbn, prob_label)
ref_state_plot_name = list(
self.cal_states_dict)[i] + '_' + s
ref_states_plot_dicts[ref_state_plot_name] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'plotsize': plotsize,
'xvals': self.proc_data_dict[
'sweep_points_dict'][qbn][
'cal_points_sweep_points'][
cal_pts_idxs],
'yvals': data_w_cal[cal_pts_idxs],
'setlabel': list(
self.cal_states_dict)[i] if
row == 0 else '',
'do_legend': row == 0,
'legend_bbox_to_anchor':
legend_bbox_to_anchor,
'legend_pos': legend_pos,
'legend_ncol': legend_ncol,
'linestyle': 'none',
'line_kws': {'color':
self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
xlabel, xunit = self.get_xaxis_label_unit(qbn)
self.plot_dicts['data_{}_{}_{}'.format(
row, qbn, prob_label)] = {
'plotfn': self.plot_line,
'fig_id': figure_name,
'plotsize': plotsize,
'xvals': phases,
'xlabel': xlabel,
'xunit': xunit,
'yvals': data,
'ylabel': '{} state population'.format(
self.get_latex_prob_label(prob_label)),
'yunit': '',
'yscale': self.get_param_value("yscale", "linear"),
'setlabel': 'Data - ' + self.legend_label_func(qbn, row)
if row in [0, 1] else '',
'title': self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] + '-' + qbn,
'linestyle': 'none',
'color': 'C0' if row % 2 == 0 else 'C2',
'do_legend': row in [0, 1],
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor': legend_bbox_to_anchor,
'legend_pos': legend_pos}
if self.do_fitting and 'projected' not in figure_name:
if qbn in self.leakage_qbnames and self.get_param_value(
'classified_ro', False):
continue
k = 'fit_{}{}_{}_{}'.format(
'on' if row % 2 == 0 else 'off', row, prob_label, qbn)
if f'Cos_{k}' in self.fit_dicts:
fit_res = self.fit_dicts[f'Cos_{k}']['fit_res']
self.plot_dicts[k + '_' + prob_label] = {
'fig_id': figure_name,
'plotfn': self.plot_fit,
'fit_res': fit_res,
'setlabel': 'Fit - ' + self.legend_label_func(qbn, row)
if row in [0, 1] else '',
'color': 'C0' if row % 2 == 0 else 'C2',
'do_legend': row in [0, 1],
'legend_ncol': legend_ncol,
'legend_bbox_to_anchor':
legend_bbox_to_anchor,
'legend_pos': legend_pos}
elif f'Linear_{k}' in self.fit_dicts:
fit_res = self.fit_dicts[f'Linear_{k}']['fit_res']
xvals = fit_res.userkws[
fit_res.model.independent_vars[0]]
xfine = np.linspace(min(xvals), max(xvals), 100)
yvals = fit_res.model.func(
xfine, **fit_res.best_values)
if not hasattr(yvals, '__iter__'):
yvals = np.array(len(xfine)*[yvals])
self.plot_dicts[k] = {
'fig_id': figure_name,
'plotfn': self.plot_line,
'xvals': xfine,
'yvals': yvals,
'marker': '',
'setlabel': 'Fit - ' + self.legend_label_func(
qbn, row) if row in [0, 1] else '',
'do_legend': row in [0, 1],
'legend_ncol': legend_ncol,
'color': 'C0' if row % 2 == 0 else 'C2',
'legend_bbox_to_anchor':
legend_bbox_to_anchor,
'legend_pos': legend_pos}
# ref state plots need to be added at the end, otherwise the
# legend for |g> and |e> is added twice (because of the
# condition do_legend = (row in [0,1]) in the plot dicts above
if self.num_cal_points > 0:
self.plot_dicts.update(ref_states_plot_dicts)
return figure_name
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.leakage_values = np.array([])
labels = ['on', 'off']
for i, qbn in enumerate(self.qb_names):
for prob_label in self.data_to_fit[qbn]:
for row in range(self.proc_data_dict['data_to_fit_reshaped'][
qbn][prob_label].shape[0]):
phases = np.unique(self.proc_data_dict['sweep_points_dict'][
qbn]['msmt_sweep_points'])
data = self.proc_data_dict['data_to_fit_reshaped'][qbn][
prob_label][row, :]
key = 'fit_{}{}_{}_{}'.format(labels[row % 2], row,
prob_label, qbn)
if qbn in self.leakage_qbnames and prob_label == 'pf':
if self.get_param_value('classified_ro', False):
self.leakage_values = np.append(self.leakage_values,
np.mean(data))
else:
# fit leakage qb results to a constant
model = lmfit.models.ConstantModel()
guess_pars = model.guess(data=data, x=phases)
self.fit_dicts[f'Linear_{key}'] = {
'fit_fn': model.func,
'fit_xvals': {'x': phases},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
elif prob_label == 'pe' or prob_label == 'pg':
# fit ramsey qb results to a cosine
model = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=model,
t=phases,
data=data, freq_guess=1/(2*np.pi))
guess_pars['frequency'].value = 1/(2*np.pi)
guess_pars['frequency'].vary = False
self.fit_dicts[f'Cos_{key}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': phases},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
# Cos fits
keys = [k for k in list(self.fit_dicts.keys()) if
(k.startswith('Cos') and k.endswith(qbn))]
if len(keys) > 0:
fit_res_objs = [self.fit_dicts[k]['fit_res'] for k in keys]
# cosine amplitudes
amps = np.array([fr.best_values['amplitude'] for fr
in fit_res_objs])
amps_errs = np.array([fr.params['amplitude'].stderr
for fr in fit_res_objs], dtype=np.float64)
amps_errs = np.nan_to_num(amps_errs)
# amps_errs.dtype = amps.dtype
if qbn in self.ramsey_qbnames:
# phase_diffs
phases = np.array([fr.best_values['phase'] for fr in
fit_res_objs])
phases_errs = np.array([fr.params['phase'].stderr for fr in
fit_res_objs], dtype=np.float64)
phases_errs = np.nan_to_num(phases_errs)
self.proc_data_dict['analysis_params_dict'][
f'phases_{qbn}'] = {
'val': phases, 'stderr': phases_errs}
# compute phase diffs
if getattr(self, 'delta_tau', 0) is not None:
# this can be false for Cyroscope with
# estimation_window == None and odd nr of trunc lengths
phase_diffs = phases[0::2] - phases[1::2]
phase_diffs %= (2*np.pi)
phase_diffs_stderrs = np.sqrt(
np.array(phases_errs[0::2]**2 +
phases_errs[1::2]**2, dtype=np.float64))
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}'] = {
'val': phase_diffs, 'stderr': phase_diffs_stderrs}
# population_loss = (cos_amp_g - cos_amp_e)/ cos_amp_g
population_loss = (amps[1::2] - amps[0::2])/amps[1::2]
x = amps[1::2] - amps[0::2]
x_err = np.array(amps_errs[0::2]**2 + amps_errs[1::2]**2,
dtype=np.float64)
y = amps[1::2]
y_err = amps_errs[1::2]
try:
population_loss_stderrs = np.sqrt(np.array(
((y * x_err) ** 2 + (x * y_err) ** 2) / (y ** 4),
dtype=np.float64))
except:
population_loss_stderrs = float("nan")
self.proc_data_dict['analysis_params_dict'][
f'population_loss_{qbn}'] = \
{'val': population_loss,
'stderr': population_loss_stderrs}
else:
self.proc_data_dict['analysis_params_dict'][
f'amps_{qbn}'] = {
'val': amps[1::2], 'stderr': amps_errs[1::2]}
# Linear fits
keys = [k for k in list(self.fit_dicts.keys()) if
(k.startswith('Linear') and k.endswith(qbn))]
if len(keys) > 0:
fit_res_objs = [self.fit_dicts[k]['fit_res'] for k in keys]
# get leakage
lines = np.array([fr.best_values['c'] for fr
in fit_res_objs])
lines_errs = np.array([fr.params['c'].stderr for
fr in fit_res_objs], dtype=np.float64)
lines_errs = np.nan_to_num(lines_errs)
leakage = lines[0::2]
leakage_errs = np.array(lines_errs[0::2], dtype=np.float64)
leakage_increase = lines[0::2] - lines[1::2]
leakage_increase_errs = np.array(np.sqrt(lines_errs[0::2]**2,
lines_errs[1::2]**2),
dtype=np.float64)
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbn}'] = \
{'val': leakage, 'stderr': leakage_errs}
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbn}'] = {'val': leakage_increase,
'stderr': leakage_increase_errs}
# special case: if classified detector was used, we get leakage
# for free
if qbn in self.leakage_qbnames and self.get_param_value(
'classified_ro', False):
leakage = self.leakage_values[0::2]
leakage_errs = np.zeros(len(leakage))
leakage_increase = self.leakage_values[0::2] - \
self.leakage_values[1::2]
leakage_increase_errs = np.zeros(len(leakage))
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbn}'] = \
{'val': leakage, 'stderr': leakage_errs}
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbn}'] = {'val': leakage_increase,
'stderr': leakage_increase_errs}
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
len_ssp = len(self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{self.ramsey_qbnames[0]}']['val'])
if self.options_dict.get('plot_all_traces', True):
for j, qbn in enumerate(self.qb_names):
if self.options_dict.get('plot_all_probs', True):
for prob_label, data_2d in self.proc_data_dict[
'projected_data_dict'][qbn].items():
figure_name = self.plot_traces(prob_label, data_2d, qbn)
else:
for prob_label, data_2d in self.proc_data_dict[
'data_to_fit'][qbn]:
figure_name = self.plot_traces(prob_label, data_2d, qbn)
if self.do_fitting and len_ssp == 1:
self.options_dict.update({'TwoD': False,
'plot_proj_data': False})
super().prepare_plots()
if qbn in self.ramsey_qbnames:
# add the cphase + leakage textboxes to the
# cphase_qbr_pe figure
figure_name = f'{self.phase_key}_{qbn}_pe'
textstr = '{} = \n{:.2f}'.format(
self.phase_key,
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}']['val'][0]*180/np.pi) + \
r'$^{\circ}$' + \
'$\\pm${:.2f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'{self.phase_key}_{qbn}'][
'stderr'][0] * 180 / np.pi) + \
r'$^{\circ}$'
textstr += '\n\nContrast loss = \n' + \
'{:.3f} $\\pm$ {:.3f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'population_loss_{qbn}']['val'][0],
self.proc_data_dict[
'analysis_params_dict'][
f'population_loss_{qbn}'][
'stderr'][0])
self.plot_dicts['cphase_text_msg_' + qbn] = {
'fig_id': figure_name,
'ypos': -0.2,
'xpos': -0.1,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'box_props': None,
'plotfn': self.plot_text,
'text_string': textstr}
qbl = [gl[0] for gl in self.gates_list
if qbn == gl[1]]
if len(qbl):
qbl = qbl[0]
textstr = 'Leakage =\n{:.5f} $\\pm$ {:.5f}'.format(
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbl}']['val'][0],
self.proc_data_dict['analysis_params_dict'][
f'leakage_{qbl}']['stderr'][0])
textstr += '\n\n$\\Delta$Leakage = \n' \
'{:.5f} $\\pm$ {:.5f}'.format(
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbl}']['val'][0],
self.proc_data_dict['analysis_params_dict'][
f'leakage_increase_{qbl}']['stderr'][0])
self.plot_dicts['cphase_text_msg_' + qbl] = {
'fig_id': figure_name,
'ypos': -0.2,
'xpos': 0.175,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'box_props': None,
'plotfn': self.plot_text,
'text_string': textstr}
else:
if f'amps_{qbn}' in self.proc_data_dict[
'analysis_params_dict']:
figure_name = f'Leakage_{qbn}_pg'
textstr = 'Amplitude CZ int. OFF = \n' + \
'{:.3f} $\\pm$ {:.3f}'.format(
self.proc_data_dict[
'analysis_params_dict'][
f'amps_{qbn}']['val'][0],
self.proc_data_dict[
'analysis_params_dict'][
f'amps_{qbn}']['stderr'][0])
self.plot_dicts['swap_text_msg_' + qbn] = {
'fig_id': figure_name,
'ypos': -0.2,
'xpos': -0.1,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'box_props': None,
'plotfn': self.plot_text,
'text_string': textstr}
# plot analysis results
if self.do_fitting and len_ssp > 1:
for qbn in self.qb_names:
ss_pars = self.proc_data_dict['sweep_points_2D_dict'][qbn]
for idx, ss_pname in enumerate(ss_pars):
xvals = self.sp.get_sweep_params_property('values', 1,
ss_pname)
xvals_to_use = deepcopy(xvals)
xlabel = self.sp.get_sweep_params_property('label', 1,
ss_pname)
xunit = self.sp.get_sweep_params_property('unit', 1,
ss_pname)
for param_name, results_dict in self.proc_data_dict[
'analysis_params_dict'].items():
if qbn in param_name:
reps = 1
if len(results_dict['val']) >= len(xvals):
reps = len(results_dict['val']) / len(xvals)
else:
# cyroscope case
if hasattr(self, 'xvals_reduction_func'):
xvals_to_use = self.xvals_reduction_func(
xvals)
else:
log.warning(f'Length mismatch between xvals'
' and analysis param for'
' {param_name}, and no'
' xvals_reduction_func has been'
' defined. Unclear how to'
' reduce xvals.')
plot_name = f'{param_name}_vs_{xlabel}'
if 'phase' in param_name:
yvals = results_dict['val']*180/np.pi - (180 if
len(self.leakage_qbnames) > 0 else 0)
yerr = results_dict['stderr']*180/np.pi
ylabel = param_name + ('-$180^{\\circ}$' if
len(self.leakage_qbnames) > 0 else '')
self.plot_dicts[plot_name+'_hline'] = {
'fig_id': plot_name,
'plotfn': self.plot_hlines,
'y': 0,
'xmin': np.min(xvals_to_use),
'xmax': np.max(xvals_to_use),
'colors': 'gray'}
else:
yvals = results_dict['val']
yerr = results_dict['stderr']
ylabel = param_name
if 'phase' in param_name:
yunit = 'deg'
elif 'freq' in param_name:
yunit = 'Hz'
else:
yunit = ''
self.plot_dicts[plot_name] = {
'plotfn': self.plot_line,
'xvals': np.repeat(xvals_to_use, reps),
'xlabel': xlabel,
'xunit': xunit,
'yvals': yvals,
'yerr': yerr if param_name != 'leakage'
else None,
'ylabel': ylabel,
'yunit': yunit,
'title': self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']
+ '-' + qbn,
'linestyle': 'none',
'do_legend': False}
class CPhaseLeakageAnalysis(MultiCZgate_Calib_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self):
super().process_data()
# Find leakage and ramsey qubit names
# first try the legacy code
leakage_qbname = self.get_param_value('leakage_qbname')
ramsey_qbname = self.get_param_value('ramsey_qbname')
if leakage_qbname is not None and ramsey_qbname is not None:
self.gates_list += [(leakage_qbname, ramsey_qbname)]
self.leakage_qbnames = [leakage_qbname]
self.ramsey_qbnames = [ramsey_qbname]
else:
# new measurement framework
task_list = self.get_param_value('task_list', default_value=[])
for task in task_list:
self.gates_list += [(task['qbl'], task['qbr'])]
self.leakage_qbnames += [task['qbl']]
self.ramsey_qbnames += [task['qbr']]
if len(self.leakage_qbnames) == 0 and len(self.ramsey_qbnames) == 0:
raise ValueError('Please provide either leakage_qbnames or '
'ramsey_qbnames.')
elif len(self.ramsey_qbnames) == 0:
self.ramsey_qbnames = [qbn for qbn in self.qb_names if
qbn not in self.leakage_qbnames]
elif len(self.leakage_qbnames) == 0:
self.leakage_qbnames = [qbn for qbn in self.qb_names if
qbn not in self.ramsey_qbnames]
if len(self.leakage_qbnames) == 0:
self.leakage_qbnames = None
self.phase_key = 'cphase'
if len(self.leakage_qbnames) > 0:
def legend_label_func(qbn, row, gates_list=self.gates_list):
leakage_qbnames = [qb_tup[0] for qb_tup in gates_list]
if qbn in leakage_qbnames:
return f'{qbn} in $|g\\rangle$' if row % 2 != 0 else \
f'{qbn} in $|e\\rangle$'
else:
qbln = [qb_tup for qb_tup in gates_list
if qbn == qb_tup[1]][0][0]
return f'{qbln} in $|g\\rangle$' if row % 2 != 0 else \
f'{qbln} in $|e\\rangle$'
else:
legend_label_func = lambda qbn, row: \
'qbc in $|g\\rangle$' if row % 2 != 0 else \
'qbc in $|e\\rangle$'
self.legend_label_func = legend_label_func
class DynamicPhaseAnalysis(MultiCZgate_Calib_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self):
super().process_data()
if len(self.ramsey_qbnames) == 0:
self.ramsey_qbnames = self.qb_names
self.phase_key = 'dynamic_phase'
self.legend_label_func = lambda qbn, row: 'no FP' \
if row % 2 != 0 else 'with FP'
class CryoscopeAnalysis(DynamicPhaseAnalysis):
def __init__(self, qb_names, *args, **kwargs):
options_dict = kwargs.get('options_dict', {})
unwrap_phases = options_dict.pop('unwrap_phases', True)
options_dict['unwrap_phases'] = unwrap_phases
kwargs['options_dict'] = options_dict
params_dict = {}
for qbn in qb_names:
s = f'Instrument settings.{qbn}'
params_dict[f'ge_freq_{qbn}'] = s+f'.ge_freq'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
self.phase_key = 'delta_phase'
def analyze_fit_results(self):
global_delta_tau = self.get_param_value('estimation_window')
task_list = self.get_param_value('task_list')
for qbn in self.qb_names:
delta_tau = deepcopy(global_delta_tau)
if delta_tau is None:
if task_list is None:
log.warning(f'estimation_window is None and task_list '
f'for {qbn} was not found. Assuming no '
f'estimation_window was used.')
else:
task = [t for t in task_list if t['qb'] == qbn]
if not len(task):
raise ValueError(f'{qbn} not found in task_list.')
delta_tau = task[0].get('estimation_window', None)
self.delta_tau = delta_tau
if self.get_param_value('analyze_fit_results_super', True):
super().analyze_fit_results()
self.proc_data_dict['tvals'] = OrderedDict()
for qbn in self.qb_names:
if delta_tau is None:
trunc_lengths = self.sp.get_sweep_params_property(
'values', 1, f'{qbn}_truncation_length')
delta_tau = np.diff(trunc_lengths)
m = delta_tau > 0
delta_tau = delta_tau[m]
phases = self.proc_data_dict['analysis_params_dict'][
f'phases_{qbn}']
delta_phases_vals = -np.diff(phases['val'])[m]
delta_phases_vals = (delta_phases_vals + np.pi) % (
2 * np.pi) - np.pi
delta_phases_errs = (np.sqrt(
np.array(phases['stderr'][1:] ** 2 +
phases['stderr'][:-1] ** 2, dtype=np.float64)))[m]
self.xvals_reduction_func = lambda xvals: \
((xvals[1:] + xvals[:-1]) / 2)[m]
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}'] = {
'val': delta_phases_vals, 'stderr': delta_phases_errs}
# remove the entries in analysis_params_dict that are not
# relevant for Cryoscope (pop_loss), since
# these will cause a problem with plotting in this case.
self.proc_data_dict['analysis_params_dict'].pop(
f'population_loss_{qbn}', None)
else:
delta_phases = self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}']
delta_phases_vals = delta_phases['val']
delta_phases_errs = delta_phases['stderr']
if self.get_param_value('unwrap_phases', False):
if hasattr(delta_tau, '__iter__'):
# unwrap in frequency such that we don't jump more than half
# the nyquist band at any step
df = []
prev_df = 0
for dp, dt in zip(delta_phases_vals, delta_tau):
df.append(dp / (2 * np.pi * dt))
df[-1] += np.round((prev_df - df[-1]) * dt) / dt
prev_df = df[-1]
delta_phases_vals = np.array(df)*(2*np.pi*delta_tau)
else:
delta_phases_vals = np.unwrap((delta_phases_vals + np.pi) %
(2*np.pi) - np.pi)
self.proc_data_dict['analysis_params_dict'][
f'{self.phase_key}_{qbn}']['val'] = delta_phases_vals
delta_freqs = delta_phases_vals/2/np.pi/delta_tau
delta_freqs_errs = delta_phases_errs/2/np.pi/delta_tau
self.proc_data_dict['analysis_params_dict'][f'delta_freq_{qbn}'] = \
{'val': delta_freqs, 'stderr': delta_freqs_errs}
qb_freqs = self.raw_data_dict[f'ge_freq_{qbn}'] + delta_freqs
self.proc_data_dict['analysis_params_dict'][f'freq_{qbn}'] = \
{'val': qb_freqs, 'stderr': delta_freqs_errs}
if hasattr(self, 'xvals_reduction_func') and \
self.xvals_reduction_func is not None:
self.proc_data_dict['tvals'][f'{qbn}'] = \
self.xvals_reduction_func(
self.proc_data_dict['sweep_points_2D_dict'][qbn][
f'{qbn}_truncation_length'])
else:
self.proc_data_dict['tvals'][f'{qbn}'] = \
self.proc_data_dict['sweep_points_2D_dict'][qbn][
f'{qbn}_truncation_length']
self.save_processed_data(key='analysis_params_dict')
self.save_processed_data(key='tvals')
def get_generated_and_measured_pulse(self, qbn=None):
"""
Args:
qbn: specifies for which qubit to calculate the quantities for.
Defaults to the first qubit in qb_names.
Returns: A tuple (tvals_gen, volts_gen, tvals_meas, freqs_meas,
freq_errs_meas, volt_freq_conv)
tvals_gen: time values for the generated fluxpulse
volts_gen: voltages of the generated fluxpulse
tvals_meas: time-values for the measured qubit frequencies
freqs_meas: measured qubit frequencies
freq_errs_meas: errors of measured qubit frequencies
volt_freq_conv: dictionary of fit params for frequency-voltage
conversion
"""
if qbn is None:
qbn = self.qb_names[0]
tvals_meas = self.proc_data_dict['tvals'][qbn]
freqs_meas = self.proc_data_dict['analysis_params_dict'][
f'freq_{qbn}']['val']
freq_errs_meas = self.proc_data_dict['analysis_params_dict'][
f'freq_{qbn}']['stderr']
tvals_gen, volts_gen, volt_freq_conv = self.get_generated_pulse(qbn)
return tvals_gen, volts_gen, tvals_meas, freqs_meas, freq_errs_meas, \
volt_freq_conv
def get_generated_pulse(self, qbn=None, tvals_gen=None, pulse_params=None):
"""
Args:
qbn: specifies for which qubit to calculate the quantities for.
Defaults to the first qubit in qb_names.
Returns: A tuple (tvals_gen, volts_gen, tvals_meas, freqs_meas,
freq_errs_meas, volt_freq_conv)
tvals_gen: time values for the generated fluxpulse
volts_gen: voltages of the generated fluxpulse
volt_freq_conv: dictionary of fit params for frequency-voltage
conversion
"""
if qbn is None:
qbn = self.qb_names[0]
# Flux pulse parameters
# Needs to be changed when support for other pulses is added.
op_dict = {
'pulse_type': f'Instrument settings.{qbn}.flux_pulse_type',
'channel': f'Instrument settings.{qbn}.flux_pulse_channel',
'aux_channels_dict': f'Instrument settings.{qbn}.'
f'flux_pulse_aux_channels_dict',
'amplitude': f'Instrument settings.{qbn}.flux_pulse_amplitude',
'frequency': f'Instrument settings.{qbn}.flux_pulse_frequency',
'phase': f'Instrument settings.{qbn}.flux_pulse_phase',
'pulse_length': f'Instrument settings.{qbn}.'
f'flux_pulse_pulse_length',
'truncation_length': f'Instrument settings.{qbn}.'
f'flux_pulse_truncation_length',
'buffer_length_start': f'Instrument settings.{qbn}.'
f'flux_pulse_buffer_length_start',
'buffer_length_end': f'Instrument settings.{qbn}.'
f'flux_pulse_buffer_length_end',
'extra_buffer_aux_pulse': f'Instrument settings.{qbn}.'
f'flux_pulse_extra_buffer_aux_pulse',
'pulse_delay': f'Instrument settings.{qbn}.'
f'flux_pulse_pulse_delay',
'basis_rotation': f'Instrument settings.{qbn}.'
f'flux_pulse_basis_rotation',
'gaussian_filter_sigma': f'Instrument settings.{qbn}.'
f'flux_pulse_gaussian_filter_sigma',
}
params_dict = {
'volt_freq_conv': f'Instrument settings.{qbn}.'
f'fit_ge_freq_from_flux_pulse_amp',
'flux_channel': f'Instrument settings.{qbn}.'
f'flux_pulse_channel',
'instr_pulsar': f'Instrument settings.{qbn}.'
f'instr_pulsar',
**op_dict
}
dd = self.get_data_from_timestamp_list(params_dict)
if pulse_params is not None:
dd.update(pulse_params)
dd['element_name'] = 'element'
pulse = seg_mod.UnresolvedPulse(dd).pulse_obj
pulse.algorithm_time(0)
if tvals_gen is None:
clk = self.clock(channel=dd['channel'], pulsar=dd['instr_pulsar'])
tvals_gen = np.arange(0, pulse.length, 1 / clk)
volts_gen = pulse.chan_wf(dd['flux_channel'], tvals_gen)
volt_freq_conv = dd['volt_freq_conv']
return tvals_gen, volts_gen, volt_freq_conv
class CZDynamicPhaseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self):
super().process_data()
# convert phases to radians
for qbn in self.qb_names:
sweep_dict = self.proc_data_dict['sweep_points_dict'][qbn]
sweep_dict['sweep_points'] *= np.pi/180
# get data with flux pulse and w/o flux pulse
self.data_with_fp = OrderedDict()
self.data_no_fp = OrderedDict()
for qbn in self.qb_names:
all_data = self.proc_data_dict['data_to_fit'][qbn]
if self.num_cal_points != 0:
all_data = all_data[:-self.num_cal_points]
self.data_with_fp[qbn] = all_data[0: len(all_data)//2]
self.data_no_fp[qbn] = all_data[len(all_data)//2:]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
sweep_points = np.unique(
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'])
for i, data in enumerate([self.data_with_fp[qbn],
self.data_no_fp[qbn]]):
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod,
t=sweep_points,
data=data, freq_guess=1/(2*np.pi))
guess_pars['frequency'].value = 1/(2*np.pi)
guess_pars['frequency'].vary = False
key = 'cos_fit_{}_{}'.format(qbn, 'wfp' if i == 0 else 'nofp')
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['analysis_params_dict'][qbn] = OrderedDict()
self.proc_data_dict['analysis_params_dict'][qbn][
'dynamic_phase'] = {
'val': (self.fit_dicts[f'cos_fit_{qbn}_wfp'][
'fit_res'].best_values['phase'] -
self.fit_dicts[f'cos_fit_{qbn}_nofp'][
'fit_res'].best_values['phase']),
'stderr': np.sqrt(
self.fit_dicts[f'cos_fit_{qbn}_wfp'][
'fit_res'].params['phase'].stderr**2 +
self.fit_dicts[f'cos_fit_{qbn}_nofp'][
'fit_res'].params['phase'].stderr**2)
}
self.save_processed_data(key='analysis_params_dict')
def prepare_plots(self):
super().prepare_plots()
for qbn in self.qb_names:
for i, data in enumerate([self.data_with_fp[qbn],
self.data_no_fp[qbn]]):
fit_key = f'<KEY>' if i == 0 else \
f'<KEY>'
plot_name_suffix = 'fit_'+'wfp' if i == 0 else 'nofp'
cal_pts_data = self.proc_data_dict['data_to_fit'][qbn][
-self.num_cal_points:]
base_plot_name = 'Dynamic_phase_' + qbn
self.prepare_projected_data_plot(
fig_name=base_plot_name,
data=np.concatenate((data,cal_pts_data)),
sweep_points=np.unique(
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points']),
data_label='with flux pulse' if i == 0 else 'no flux pulse',
plot_name_suffix=qbn + plot_name_suffix,
qb_name=qbn,
do_legend_cal_states=(i == 0))
if self.do_fitting:
fit_res = self.fit_dicts[fit_key]['fit_res']
self.plot_dicts[plot_name_suffix + '_' + qbn] = {
'fig_id': base_plot_name,
'plotfn': self.plot_fit,
'fit_res': fit_res ,
'setlabel': 'cosine fit',
'color': 'r',
'do_legend': i == 0}
textstr = 'Dynamic phase {}:\n\t{:.2f}'.format(
qbn,
self.proc_data_dict['analysis_params_dict'][qbn][
'dynamic_phase']['val']*180/np.pi) + \
r'$^{\circ}$' + \
'$\\pm${:.2f}'.format(
self.proc_data_dict['analysis_params_dict'][qbn][
'dynamic_phase']['stderr']*180/np.pi) + \
r'$^{\circ}$'
fpl = self.get_param_value('flux_pulse_length')
if fpl is not None:
textstr += '\n length: {:.2f} ns'.format(fpl*1e9)
fpa = self.get_param_value('flux_pulse_amp')
if fpa is not None:
textstr += '\n amp: {:.4f} V'.format(fpa)
self.plot_dicts['text_msg_' + qbn] = {
'fig_id': base_plot_name,
'ypos': -0.15,
'xpos': -0.05,
'horizontalalignment': 'left',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
for plot_name in list(self.plot_dicts)[::-1]:
if self.plot_dicts[plot_name].get('do_legend', False):
break
self.plot_dicts[plot_name].update(
{'legend_ncol': 2,
'legend_bbox_to_anchor': (1, -0.15),
'legend_pos': 'upper right'})
class MultiQutrit_Timetrace_Analysis(ba.BaseDataAnalysis):
"""
Analysis class for timetraces, in particular use to compute
Optimal SNR integration weights.
"""
def __init__(self, qb_names=None, auto=True, **kwargs):
"""
Initializes the timetrace analysis class.
Args:
qb_names (list): name of the qubits to analyze (can be a subset
of the measured qubits)
auto (bool): Start analysis automatically
**kwargs:
t_start: timestamp of the first timetrace
t_stop: timestamp of the last timetrace to analyze
options_dict (dict): relevant parameters:
acq_weights_basis (list, dict):
list of basis vectors used to compute optimal weight.
e.g. ["ge", 'gf'], the first basis vector will be the
"e" timetrace minus the "g" timetrace and the second basis
vector is f - g. The first letter in each basis state is the
"reference state", i.e. the one of which the timetrace
is substracted. Can also be passed as a dictionary where
keys are the qubit names and the values are lists of basis states
in case different bases should be used for different qubits.
orthonormalize (bool): Whether or not to orthonormalize the
weight basis
tmax (float): time boundary for the plot (not the weights)
in seconds.
scale_weights (bool): scales the weights near unity to avoid
loss of precision on FPGA if weights are too small
"""
if qb_names is not None:
self.params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.' + qbn
for trans_name in ['ge', 'ef']:
self.params_dict[f'ro_mod_freq_' + qbn] = \
s + f'.ro_mod_freq'
self.numeric_params = list(self.params_dict)
self.qb_names = qb_names
super().__init__(**kwargs)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
# get all qubits from cal_points of first timetrace
cp = CalibrationPoints.from_string(
self.get_param_value('cal_points', None, 0))
self.qb_names = deepcopy(cp.qb_names)
self.channel_map = self.get_param_value('channel_map', None,
metadata_index=0)
if self.channel_map is None:
# assume same channel map for all timetraces (pick 0th)
value_names = self.raw_data_dict[0]['value_names']
if np.ndim(value_names) > 0:
value_names = value_names
if 'w' in value_names[0]:
self.channel_map = a_tools.get_qb_channel_map_from_hdf(
self.qb_names, value_names=value_names,
file_path=self.raw_data_dict['folder'])
else:
self.channel_map = {}
for qbn in self.qb_names:
self.channel_map[qbn] = value_names
if len(self.channel_map) == 0:
raise ValueError('No qubit RO channels have been found.')
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
pdd['analysis_params_dict'] = dict()
ana_params = pdd['analysis_params_dict']
ana_params['timetraces'] = defaultdict(dict)
ana_params['optimal_weights'] = defaultdict(dict)
ana_params['optimal_weights_basis_labels'] = defaultdict(dict)
for qbn in self.qb_names:
# retrieve time traces
for i, rdd in enumerate(self.raw_data_dict):
ttrace_per_ro_ch = [rdd["measured_data"][ch]
for ch in self.channel_map[qbn]]
if len(ttrace_per_ro_ch) != 2:
raise NotImplementedError(
'This analysis does not support optimal weight '
f'measurement based on {len(ttrace_per_ro_ch)} ro channels.'
f' Try again with 2 RO channels.')
cp = CalibrationPoints.from_string(
self.get_param_value('cal_points', None, i))
# get state of qubit. There can be only one cal point per sequence
# when using uhf for time traces so it is the 0th state
qb_state = cp.states[0][cp.qb_names.index(qbn)]
# store all timetraces in same pdd for convenience
ana_params['timetraces'][qbn].update(
{qb_state: ttrace_per_ro_ch[0] + 1j *ttrace_per_ro_ch[1]})
timetraces = ana_params['timetraces'][qbn] # for convenience
basis_labels = self.get_param_value('acq_weights_basis', None, 0)
if basis_labels is None:
# guess basis labels from # states measured
basis_labels = ["ge", "ef"] \
if len(ana_params['timetraces'][qbn]) > 2 else ['ge']
if isinstance(basis_labels, dict):
# if different basis for qubits, then select the according one
basis_labels = basis_labels[qbn]
# check that states from the basis are included in mmnt
for bs in basis_labels:
for qb_s in bs:
assert qb_s in timetraces,\
f'State: {qb_s} on {qbn} was not provided in the given ' \
f'timestamps but was requested as part of the basis' \
f' {basis_labels}. Please choose another weight basis.'
basis = np.array([timetraces[b[1]] - timetraces[b[0]]
for b in basis_labels])
# orthonormalize if required
if self.get_param_value("orthonormalize", False):
# We need to consider the integration weights as a vector of
# real numbers to ensure the Gram-Schmidt transformation of the
# weights leads to a linear transformation of the integrated
# readout results (relates to how integration is done on UHF,
# see One Note: Surface 17/ATC75 M136 S17HW02 Cooldown 5/
# 210330 Notes on orthonormalizing readout weights
basis_real = np.hstack((basis.real, basis.imag), )
basis_real = math.gram_schmidt(basis_real.T).T
basis = basis_real[:,:basis_real.shape[1]//2] + \
1j*basis_real[:,basis_real.shape[1]//2:]
basis_labels = [bs + "_ortho" if bs != basis_labels[0] else bs
for bs in basis_labels]
# scale if required
if self.get_param_value('scale_weights', True):
k = np.amax([(np.max(np.abs(b.real)),
np.max(np.abs(b.imag))) for b in basis])
basis /= k
ana_params['optimal_weights'][qbn] = basis
ana_params['optimal_weights_basis_labels'][qbn] = basis_labels
self.save_processed_data()
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
ana_params = self.proc_data_dict['analysis_params_dict']
for qbn in self.qb_names:
mod_freq = float(
rdd[0].get(f'ro_mod_freq_{qbn}',
self.get_hdf_param_value(f"Instrument settings/{qbn}",
'ro_mod_freq')))
tbase = rdd[0]['hard_sweep_points']
basis_labels = pdd["analysis_params_dict"][
'optimal_weights_basis_labels'][qbn]
title = 'Optimal SNR weights ' + qbn + \
"".join(['\n' + rddi["timestamp"] for rddi in rdd]) \
+ f'\nWeight Basis: {basis_labels}'
plot_name = f"weights_{qbn}"
xlabel = "Time, $t$"
modulation = np.exp(2j * np.pi * mod_freq * tbase)
for ax_id, (state, ttrace) in \
enumerate(ana_params["timetraces"][qbn].items()):
for func, label in zip((np.real, np.imag), ('I', "Q")):
# plot timetraces for each state, I and Q channels
self.plot_dicts[f"{plot_name}_{state}_{label}"] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': tbase,
"marker": "",
'yvals': func(ttrace*modulation),
'ylabel': 'Voltage, $V$',
'yunit': 'V',
"sharex": True,
"setdesc": label + f"_{state}",
"setlabel": "",
"do_legend":True,
"legend_pos": "upper right",
'numplotsx': 1,
'numplotsy': len(rdd) + 1, # #states + 1 for weights
'plotsize': (10,
(len(rdd) + 1) * 3), # 3 inches per plot
'title': title if ax_id == 0 else ""}
ax_id = len(ana_params["timetraces"][qbn]) # id plots for weights
for i, weights in enumerate(ana_params['optimal_weights'][qbn]):
for func, label in zip((np.real, np.imag), ('I', "Q")):
self.plot_dicts[f"{plot_name}_weights_{label}_{i}"] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': tbase,
'xlabel': xlabel,
"setlabel": "",
"marker": "",
'xunit': 's',
'yvals': func(weights * modulation),
'ylabel': 'Voltage, $V$ (arb.u.)',
"sharex": True,
"xrange": (0, self.get_param_value('tmax', 1200e-9, 0)),
"setdesc": label + f"_{i+1}",
"do_legend": True,
"legend_pos": "upper right",
}
class MultiQutrit_Singleshot_Readout_Analysis(MultiQubit_TimeDomain_Analysis):
"""
Analysis class for parallel SSRO qutrit/qubit calibration. It is a child class
from the tda.MultiQubit_Timedomain_Analysis as it uses the same functions to
- preprocess the data to remove active reset/preselection
- extract the channel map
- reorder the data per qubit
Note that in the future, it might be useful to transfer these functionalities
to the base analysis.
"""
def __init__(self,
options_dict: dict = None, auto=True, **kw):
'''
options dict options:
'nr_bins' : number of bins to use for the histograms
'post_select' :
'post_select_threshold' :
'nr_samples' : amount of different samples (e.g. ground and excited = 2)
'sample_0' : index of first sample (ground-state)
'sample_1' : index of second sample (first excited-state)
'max_datapoints' : maximum amount of datapoints for culumative fit
'log_hist' : use log scale for the y-axis of the 1D histograms
'verbose' : see BaseDataAnalysis
'presentation_mode' : see BaseDataAnalysis
'classif_method': how to classify the data.
'ncc' : default. Nearest Cluster Center
'gmm': gaussian mixture model.
'threshold': finds optimal vertical and horizontal thresholds.
'classif_kw': kw to pass to the classifier
see BaseDataAnalysis for more.
'''
super().__init__(options_dict=options_dict, auto=False,
**kw)
self.params_dict = {
'measurementstring': 'measurementstring',
'measured_data': 'measured_data',
'value_names': 'value_names',
'value_units': 'value_units'}
self.numeric_params = []
self.DEFAULT_CLASSIF = "gmm"
self.classif_method = self.options_dict.get("classif_method",
self.DEFAULT_CLASSIF)
self.create_job(options_dict=options_dict, auto=auto, **kw)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
self.preselection = \
self.get_param_value("preparation_params",
{}).get("preparation_type", "wait") == "preselection"
default_states_info = defaultdict(dict)
default_states_info.update({"g": {"label": r"$|g\rangle$"},
"e": {"label": r"$|e\rangle$"},
"f": {"label": r"$|f\rangle$"}
})
self.states_info = \
self.get_param_value("states_info",
{qbn: deepcopy(default_states_info)
for qbn in self.qb_names})
def process_data(self):
"""
Create the histograms based on the raw data
"""
######################################################
# Separating data into shots for each level #
######################################################
super().process_data()
del self.proc_data_dict['data_to_fit'] # not used in this analysis
n_states = len(self.cp.states)
# prepare data in convenient format, i.e. arrays per qubit and per state
# e.g. {'qb1': {'g': np.array of shape (n_shots, n_ro_ch}, ...}, ...}
shots_per_qb = dict() # store shots per qb and per state
presel_shots_per_qb = dict() # store preselection ro
means = defaultdict(OrderedDict) # store mean per qb for each ro_ch
pdd = self.proc_data_dict # for convenience of notation
for qbn in self.qb_names:
# shape is (n_shots, n_ro_ch) i.e. one column for each ro_ch
shots_per_qb[qbn] = \
np.asarray(list(
pdd['meas_results_per_qb'][qbn].values())).T
# make 2D array in case only one channel (1D array)
if len(shots_per_qb[qbn].shape) == 1:
shots_per_qb[qbn] = np.expand_dims(shots_per_qb[qbn],
axis=-1)
for i, qb_state in enumerate(self.cp.get_states(qbn)[qbn]):
means[qbn][qb_state] = np.mean(shots_per_qb[qbn][i::n_states],
axis=0)
if self.preselection:
# preselection shots were removed so look at raw data
# and look at only the first out of every two readouts
presel_shots_per_qb[qbn] = \
np.asarray(list(
pdd['meas_results_per_qb_raw'][qbn].values())).T[::2]
# make 2D array in case only one channel (1D array)
if len(presel_shots_per_qb[qbn].shape) == 1:
presel_shots_per_qb[qbn] = \
| np.expand_dims(presel_shots_per_qb[qbn], axis=-1) | numpy.expand_dims |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 14:03:35 2022
@author: USER
"""
#3rd party Modules
import numpy as np
#import sys
import warnings
from scipy.stats import qmc
import scipy.stats as sct
import mpi4py.MPI as MPI
class GsaOptions:
def __init__(self, run = True, run_sobol=True, run_morris=True, n_samp_sobol=100000, \
n_samp_morris=4, l_morris=3):
self.run = run
if self.run == False:
self.run_sobol = False
self.run_morris = False
else:
self.run_sobol=run_sobol #Whether to run Sobol (True or False)
self.run_morris=run_morris #Whether to run Morris (True or False)
self.n_samp_sobol = n_samp_sobol #Number of samples to be generated for GSA
self.n_samp_morris = n_samp_morris
self.l_morris=l_morris
pass
class GsaResults:
#
def __init__(self,sobol_base=np.nan, sobol_tot=np.nan, f_a=np.nan, f_b=np.nan, f_d=np.nan, f_ab=np.nan, \
samp_d=np.nan, morris_std=np.nan, morris_mean_abs=np.nan, morris_mean=np.nan):
self.sobol_base=sobol_base
self.sobol_tot=sobol_tot
self.f_a=f_a
self.f_b=f_b
self.f_d=f_d
self.f_ab=f_ab
self.samp_d=samp_d
self.morris_mean_abs=morris_mean_abs
self.morris_mean = morris_mean
self.morris_std=morris_std
pass
##--------------------------------------GSA-----------------------------------------------------
def run_gsa(model, gsa_options, logging = False):
"""Implements global sensitivity analysis using Morris or Sobol analysis.
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
GsaResults
Holds all run results
"""
#GSA implements the following local sensitivity analysis methods on "model" object
# 1) Gets sampling distribution (used only for internal calculations)
# 2) Calculates Sobol Indices
# 3) Performs Morris Screenings (not yet implemented)
# 4) Produces histogram plots for QOI values (not yet implemented)
# Required Inputs: Object of class "model" and object of class "options"
# Outputs: Object of class gsa with fisher and sobol elements
#Load mpi details to keep track of thread number
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
# Initialize gsa_results in all threads
gsa_results = GsaResults()
#Morris Screening
if gsa_options.run_morris:
#Set non-biased perturbation distance for even l
#Source: <NAME>. 2011. Uncertainty Quanitification. p.333
pert_distance = gsa_options.l_morris/ (2*(gsa_options.l_morris-1))
#Create parameter sample only on thread 0 since it need not be parallelized
# initialize memory location on all threads
morris_samp = np.zeros((gsa_options.n_samp_morris*(model.n_poi+1), model.n_poi),dtype = float)
if logging > 1:
print("initialized morris_samp of size: " + str(morris_samp.shape))
if mpi_rank == 0:
if logging:
print("Generating Morris Sample")
morris_samp = get_morris_poi_sample(model.sample_fcn, gsa_options.n_samp_morris,\
model.n_poi, pert_distance)
mpi_comm.Bcast([morris_samp,MPI.DOUBLE], root = 0)
morris_mean_abs, morris_mean, morris_std = calculate_morris(\
model.eval_fcn, morris_samp, \
pert_distance, logging = logging)
gsa_results.morris_mean_abs=morris_mean_abs
gsa_results.morris_mean = morris_mean
gsa_results.morris_std=morris_std
#Sobol Analysis Un parallelized for now
if gsa_options.run_sobol and mpi_rank == 0:
if logging:
print("Generating Sobol Sample")
#Make Distribution Samples and Calculate model results
[f_a, f_b, f_ab, f_d, samp_d] = get_sobol_sample(model, gsa_options)
#Calculate Sobol Indices
if logging:
print("Calculating Sobol Sample")
[sobol_base, sobol_tot]=calculate_sobol(f_a, f_b, f_ab, f_d)
gsa_results.f_d=f_d
gsa_results.f_a=f_a
gsa_results.f_b=f_b
gsa_results.f_ab=f_ab
gsa_results.samp_d=samp_d
gsa_results.sobol_base=sobol_base
gsa_results.sobol_tot=sobol_tot
#------------broadcast gsa results to other threads--------------------
return gsa_results
###----------------------------------------------------------------------------------------------
###-------------------------------------Support Functions----------------------------------------
###----------------------------------------------------------------------------------------------
def get_sobol_sample(model,gsa_options):
"""Constructs and evaluates sobol samples using predefined sampling distributions.
Currently only function for uniform or saltelli normal
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part a
np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part b
np.ndarray
n_samp_sobol x n_qoi array x n_poi array of evaluations of mixed Sobol sample ab
np.ndarray
2*n_samp_sobol x n_qoi array of concatenated evaluations of part a and b
np.ndarray
2*n_samp_sobol x n_poi array of concatenated POI samples of part a and b
"""
n_samp_sobol = gsa_options.n_samp_sobol
# Make 2 POI sample matrices with n_samp_sobol samples each
# if np.all(model.dist_type!=np.array(["satelli normal", "satelli uniform"])):
# warnings.warn("Non-satelli sampling algorithm used for Sobol analysis."\
# + " Suggested distribution types are satelli normal "+\
# "and satelli uniform.")
sample_compact = model.sample_fcn(2*n_samp_sobol)
f_compact = model.eval_fcn(sample_compact)
# Seperate sample into a and b for algorithm
samp_a = sample_compact[:n_samp_sobol]
samp_b = sample_compact[n_samp_sobol:]
f_a = f_compact[:n_samp_sobol]
f_b = f_compact[n_samp_sobol:] # n_samp_sobol x nQOI out matrix from B
# Stack the output matrices into a single matrix
f_d = np.concatenate((f_a.copy(), f_b.copy()), axis=0)
# Initialize combined QOI sample matrices
if model.n_qoi == 1:
f_ab = np.empty([n_samp_sobol, model.n_poi])
else:
f_ab = np.empty([n_samp_sobol, model.n_poi, model.n_qoi])
for i_param in range(0, model.n_poi):
# Define sampC to be A with the ith parameter in B
samp_ab = samp_a.copy()
samp_ab[:, i_param] = samp_b[:, i_param].copy()
if model.n_qoi == 1:
f_ab[:, i_param] = model.eval_fcn(samp_ab).squeeze()
else:
f_ab[:, i_param, :] = model.eval_fcn(samp_ab) # n_samp_sobol x nPOI x nQOI tensor
del samp_ab
return f_a, f_b, f_ab, f_d, sample_compact
def calculate_sobol(f_a, f_b, f_ab, f_d):
"""Calculates 1st order and total sobol indices using Saltelli approximation formula.
Parameters
----------
f_a : np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part a
f_b : np.ndarray
n_samp_sobol x n_qoi array of evaluations of Sobol sample part b
f_ab : np.ndarray
n_samp_sobol x n_qoi array x n_poi array of evaluations of mixed Sobol sample ab
f_d : np.ndarray
2*n_samp_sobol x n_qoi array of concatenated evaluations of part a and b
Returns
-------
np.ndarray
n_qoi x n_poi array of 1st order Sobol indices
np.ndarray
n_qoi x n_poi array of total Sobol indices
"""
#Calculates calculates sobol indices using satelli approximation method
#Inputs: model object (with eval_fcn, sample, and nParams)
# sobolOptions object
#Determing number of samples, QOIs, and POIs based on inputs
if f_ab.ndim==1:
n_qoi=1
n_poi=1
elif f_ab.ndim==2:
n_qoi=1
n_poi=f_ab.shape[1]
elif f_ab.ndim==3:
n_poi=f_ab.shape[1]
n_qoi=f_ab.shape[2]
else:
raise(Exception('f_ab has greater than 3 dimensions, make sure f_ab is' \
'the squeezed form of n_samp_sobol x nPOI x nQOI'))
#QOI variance
fDvar=np.var(f_d, axis=0)
sobol_base=np.empty((n_qoi, n_poi))
sobol_tot=np.empty((n_qoi, n_poi))
if n_qoi==1:
#Calculate 1st order parameter effects
sobol_base=np.mean(f_b*(f_ab-f_a), axis=0)/(fDvar)
#Caclulate 2nd order parameter effects
sobol_tot=np.mean((f_a-f_ab)**2, axis=0)/(2*fDvar)
else:
for iQOI in range(0,n_qoi):
#Calculate 1st order parameter effects
sobol_base[iQOI,:]=np.mean(f_b[:,[iQOI]]*(f_ab[:,:,iQOI]-f_a[:,[iQOI]]),axis=0)/fDvar[iQOI]
#Caclulate 2nd order parameter effects
sobol_tot[iQOI,:]= np.mean((f_a[:,[iQOI]]-f_ab[:,:,iQOI])**2,axis=0)/(2*fDvar[iQOI])
return sobol_base, sobol_tot
#==============================================================================
#----------------------------------Morris Sampling-----------------------------
#==============================================================================
##--------------------------------calculate_morris-----------------------------
def calculate_morris(eval_fcn, morris_samp, pert_distance, logging = False):
"""Calculates morris samples using information from Model and GsaOptions objects.
Parameters
----------
model : Model
Contaings simulation information.
gsa_options : GSAOptions
Contains run settings
Returns
-------
np.ndarray
n_qoi x n_poi array of morris sensitivity mean indices
np.ndarray
n_qoi x n_poi array of morris sensitivity variance indices
"""
#Evaluate Sample
#Load mpi details to keep track of thread number
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()
if logging and mpi_rank == 0:
print("Evaulating Morris Sample")
if mpi_size == 1:
f_eval_compact = eval_fcn(morris_samp)
else:
f_eval_compact = parallel_eval(eval_fcn, morris_samp, logging = logging)
#Make sure all threads finish collecting f_eval_compact before continuing
mpi_comm.Barrier()
if logging > 1 and mpi_rank == 0:
print("f_eval_compact: " + str(f_eval_compact))
# Initialize Morris indices so that the memory is reserved when broadcasting
if f_eval_compact.ndim == 2:
morris_mean_abs = np.zeros((morris_samp.shape[1], f_eval_compact.shape[1]), dtype = float) # n_poi x n_qoi
morris_mean = np.zeros(morris_mean_abs.shape, dtype = float)
morris_std = np.zeros(morris_mean_abs.shape, dtype = float) # n_poi x n_qoi
else :
morris_mean_abs = np.zeros((morris_samp.shape[1]), dtype = float) # n_poi x n_qoi
morris_mean = np.zeros(morris_mean_abs.shape, dtype = float)
morris_std = np.zeros(morris_mean_abs.shape, dtype = float) # n_poi x n_qoi
# Perform morris calculation only on base thread
if mpi_rank == 0:
#Compute # of pois, qois and samples to ensure consitency
if morris_samp.ndim == 1:
n_poi = 1
elif morris_samp.ndim == 2:
n_poi = morris_samp.shape[1]
else:
raise Exception("More than 2 dimensions in morris_samp")
#Convert to int so it can be used in indexing
n_samp = int(morris_samp.shape[0]/(n_poi+1))
if f_eval_compact.ndim == 2:
n_qoi = f_eval_compact.shape[1]
elif f_eval_compact.ndim ==1:
n_qoi = 1
else:
raise Exception("More than 2 dimensions in f_eval")
#Uncompact Samples
f_eval_seperated = morris_seperate(f_eval_compact, n_samp, n_poi, n_qoi)
morris_samp_seperated = morris_seperate(morris_samp, n_samp, n_poi, n_poi)
if logging > 1:
print("morris samp seperated: " + str(morris_samp_seperated))
#Get which sample perturbs which poi
poi_pert_location = get_poi_pert_location(morris_samp_seperated)
if logging > 1:
print("poi_pert_location: " + str(poi_pert_location))
#initialize data storage arrays with 1 dimension lower if n_qoi =1
if n_qoi > 1:
deriv_approx = np.empty((n_samp, n_poi, n_qoi)) # n_samp x n_poi x n_qoi
else:
deriv_approx = | np.empty((n_samp, n_poi)) | numpy.empty |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 10:35:05 2018
@author: ANDD
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.io as sio
import matplotlib
#%matplotlib qt
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
Wnam = '15_5_5';top_blocks = [2050,2154, 2200,2245]
#Wnam = '155_6';top_blocks = [2056,2160, 2222,2267]
#Wnam = '155_3';top_blocks = [2076,2180, 2240,2285]
infile = Wnam + '.mat'
Wdict = sio.loadmat(infile)
keys = ['Md','Vp_b', 'Vs_b', 'Rhob_b', 'porosity', 'Vsh', 'Vp_o', 'Vs_o', 'Rhob_o']
cleaned_wdict = {key: Wdict[key].ravel() for key in keys}
df_in = pd.DataFrame.from_dict(cleaned_wdict)
df_in = df_in.dropna()
Md = df_in.Md
Wdict=df_in
def indices(a, func):
return [i for (i, val) in enumerate(a) if func(val)]
def logplot(Vsh,Md,Vp,Vs,Rho,MD,Vp_out,Vs_out,Rho_out):
plt.figure(figsize = (20,10))
plt.subplot(1,4,1)
plt.plot(Vsh,Md)
plt.ylim(np.max(MD)+2,np.min(MD)-2)
plt.xlabel('Vsh (fraction)')
plt.ylabel('Measured depth (m)')
plt.subplot(1,4,2)
plt.plot(Vp,Md)
plt.plot(Vp_out,MD, color = 'red',linewidth=4.0 )
plt.ylim(np.max(MD)+2,np.min(MD)-2)
plt.xlabel('Vp (m/s)')
plt.gca().set_yticks([])
plt.subplot(1,4,3)
plt.plot(Vs,Md)
plt.plot(Vs_out,MD, color = 'red' ,linewidth=4.0)
plt.ylim( | np.max(MD) | numpy.max |
'''
Abbasnejad et al. cars dataset
'''
import datetime
import os
import sys
from sklearn.model_selection._split import KFold
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/test")
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.DEBUG)
# include the paths for the other directories
import time
from scipy.optimize._minimize import minimize
from scipy.stats.stats import kendalltau
from collab_pref_learning_fitc import CollabPrefLearningFITC
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, log_loss
from collab_pref_learning_svi import CollabPrefLearningSVI
# from collab_pref_learning_svi_old import CollabPrefLearningSVI
from gp_pref_learning import GPPrefLearning
from per_user_pref_learning import GPPrefPerUser
verbose = False
def convert_discrete_to_continuous(features, cols_to_convert):
new_features = None
for col in np.arange(features.shape[1]):
if col not in cols_to_convert:
if new_features is None:
new_features = features[:, col:col+1]
else:
new_features = np.concatenate((new_features, features[:, col:col+1]), axis=1)
continue
maxval = np.max(features[:, col])
minval = np.min(features[:, col])
nvals = maxval - minval + 1
vals = np.arange(nvals) + minval
disc_vecs = None
for val in vals:
if disc_vecs is None:
disc_vecs = (features[:, col] == val)[:, None]
else:
disc_vecs = np.concatenate((disc_vecs, (features[:, col]==val)[:, None]), axis=1)
if new_features is None:
new_features = disc_vecs
else:
new_features = np.concatenate((new_features, disc_vecs), axis=1)
return new_features.astype(int)
def run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr,
u_test=None, i1_test=None, i2_test=None,
ninducing=None, use_common_mean=True, no_local_y=False):
Nfactors = ufeats.shape[0]
if Nfactors > max_facs:
Nfactors = max_facs # this is the maximum
if ninducing is None:
ninducing = np.max([ifeats.shape[0], ufeats.shape[0]])
model = CollabPrefLearningSVI(ifeats.shape[1], ufeats.shape[1], mu0=0, shape_s0=shape_s0, rate_s0=rate_s0,
shape_sy0=1e6, rate_sy0=1e6, ls=None,
nfactors=Nfactors, ninducing=ninducing, max_update_size=max_update_size,
forgetting_rate=forgetting_rate, verbose=verbose, use_lb=True,
use_common_mean_t=use_common_mean, delay=delay)
model.max_Kw_size = max_Kw_size
model.max_iter = 200
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, ufeats, use_median_ls=True)
if no_local_y:
model.use_local_obs_posterior_y = False
if u_test is None:
return model
# fpred = model.predict_f(ifeats[active_items], ufeats)
# rho_pred = model.predict(u_test, i1_test, i2_test, ifeats, ufeats)
fpred = model.predict_f()
rho_pred = model.predict(u_test, i1_test, i2_test)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_GPPL_pooled(_, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, __, i1_test, i2_test):
# we can use more inducing points because we don't have to compute GPs for the users and items separately,
# so indcrease the number to make comparison fair.
pool_ninducing = int(ninducing * 2**(1/3.0))
model = GPPrefLearning(ifeats.shape[1], mu0=0, shape_s0=shape_s0, rate_s0=rate_s0, ls_initial=None, use_svi=True,
ninducing=pool_ninducing, max_update_size=max_update_size, forgetting_rate=forgetting_rate,
verbose=verbose)
model.max_iter_VB = 500
model.fit(i1_tr, i2_tr, ifeats, prefs_tr, use_median_ls=True)
fpred, _ = np.tile(model.predict_f(), (1, ufeats.shape[0]))
rho_pred, _ = model.predict(None, i1_test, i2_test)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_GPPL_joint(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
# we can use more inducing points because we don't have to compute GPs for the users and items separately,
# so indcrease the number to make comparison fair.
joint_ninducing = int(ninducing * 2**(1/3.0))
model = GPPrefLearning(ifeats.shape[1], mu0=0, shape_s0=shape_s0, rate_s0=rate_s0, ls_initial=None, use_svi=True,
ninducing=joint_ninducing, max_update_size=max_update_size, forgetting_rate=forgetting_rate, verbose=verbose)
model.max_iter_VB = 500
# we need to use only the features for the subset of users in the training set!
# if user features are not very informative, then the inducing points may be fairly useless.
# this might explain why performance is low for joint model and crowd-GPPL.
# However, BMF is and GPPL\u is still too low?
joint_ifeats = np.tile(ifeats, (ufeats.shape[0], 1))
joint_ufeats = np.tile(ufeats, (1, ifeats.shape[0])).reshape((ufeats.shape[0]*ifeats.shape[0], ufeats.shape[1]))
joint_feats = np.concatenate((joint_ifeats, joint_ufeats), axis=1)
i1_tr = i1_tr + (ifeats.shape[0] * u_tr)
i2_tr = i2_tr + (ifeats.shape[0] * u_tr)
model.fit(i1_tr, i2_tr, joint_feats, prefs_tr, use_median_ls=True)
# need to split this up to compute because predict needs pairwise covariance terms and ends up computing full covariance
batchsize = 100
nbatches = int(np.ceil(np.unique(u_test).shape[0] / float(batchsize)))
rho_pred = []
for batch in range(nbatches):
# all of the pairs and features that relate to a batch of users
idxs = (u_test >= (batch) * batchsize) & (u_test < (batch+1) * batchsize)
u_test_b = u_test[idxs]
i1_test_b = i1_test[idxs]
i2_test_b = i2_test[idxs]
joint_feats_idxs_b, pairs_b = np.unique([i1_test_b + (ifeats.shape[0] * u_test_b),
i2_test_b + (ifeats.shape[0] * u_test_b)],
return_inverse=True)
pairs_b = pairs_b.reshape(2, i1_test_b.shape[0])
rho_pred_b, _ = model.predict(joint_feats[joint_feats_idxs_b], pairs_b[0], pairs_b[1])
rho_pred = np.append(rho_pred, rho_pred_b)
joint_ifeats = np.tile(ifeats, (ufeats.shape[0], 1))
joint_ufeats = np.tile(ufeats, (1, ifeats.shape[0])).reshape((ufeats.shape[0]*ifeats.shape[0],
ufeats.shape[1]))
joint_feats = np.concatenate((joint_ifeats, joint_ufeats), axis=1)
fpred, _ = model.predict_f(joint_feats)
fpred = fpred.reshape(ufeats.shape[0], ifeats.shape[0]).T
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_GPPL_per_user(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
model = GPPrefPerUser(ufeats.shape[0], max_update_size, shape_s0, rate_s0, ifeats.shape[1], ninducing)
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, None, False, use_median_ls=True)
fpred = model.predict_f(None, personids=None)
rho_pred = model.predict(u_test, i1_test, i2_test, None, None)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_crowd_GPPL_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
Nfactors = ufeats.shape[0]
if Nfactors > max_facs:
Nfactors = max_facs # this is the maximum
model = CollabPrefLearningSVI(ifeats.shape[1], 0, mu0=0, shape_s0=shape_s0, rate_s0=rate_s0,
shape_sy0=1e6, rate_sy0=1e6, ls=None,
nfactors=Nfactors, ninducing=ninducing, max_update_size=max_update_size,
forgetting_rate=forgetting_rate, verbose=verbose, use_lb=True,
use_common_mean_t=True, delay=delay)
model.max_Kw_size = max_Kw_size
model.max_iter = 500
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, None, use_median_ls=True)
fpred = model.predict_f(None, None)
rho_pred = model.predict(u_test, i1_test, i2_test, None, None)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def run_crowd_BMF(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
Nfactors = ufeats.shape[0]
if Nfactors > max_facs:
Nfactors = max_facs # this is the maximum
model = CollabPrefLearningSVI(1, 1, mu0=0, shape_s0=shape_s0, rate_s0=rate_s0,
shape_sy0=1e6, rate_sy0=1e6, ls=None,
nfactors=Nfactors, ninducing=ninducing, max_update_size=max_update_size,
forgetting_rate=forgetting_rate, verbose=verbose, use_lb=True, kernel_func='diagonal',
delay=delay)
model.max_Kw_size = max_Kw_size
model.max_iter = 500
model.fit(u_tr, i1_tr, i2_tr, ifeats, prefs_tr, None, use_median_ls=True)
fpred = model.predict_f(None, None)
rho_pred = model.predict(u_test, i1_test, i2_test, ifeats, None)
# return predictions of preference scores for training users, new testing users, and pairwise testing labels
return fpred, rho_pred
def train_test(method_name, u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test):
if method_name == 'crowd-GPPL':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=ninducing)
elif method_name == 'crowd-GPPL-ny':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=ninducing, no_local_y=True)
elif method_name == 'crowd-GPPL-noConsensus':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=ninducing, use_common_mean=False)
elif method_name == 'crowd-GPPL-noInduc':
return run_crowd_GPPL(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, ninducing=None)
elif method_name == 'GPPL-pooled':
return run_GPPL_pooled(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'GPPL-joint':
return run_GPPL_joint(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'GPPL-per-user':
return run_GPPL_per_user(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-GPPL\\u':
return run_crowd_GPPL_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-BMF':
return run_crowd_BMF(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-GPPL-FITC\\u-noConsensus': # No common mean, i.e. like Houlsby but SVI
return run_collab_FITC_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test)
elif method_name == 'crowd-GPPL-FITC\\u':
return run_collab_FITC_without_u(u_tr, i1_tr, i2_tr, ifeats, ufeats, prefs_tr, u_test, i1_test, i2_test, use_common_mean=True)
def run_expt(methods, expt_name):
# predictions from all reps and methods
fpred_all = []
rho_pred_all = []
# metrics from all reps and methods
acc_all = []
logloss_all = []
times_all = []
# for repeatability
np.random.seed(30)
results_path = './results/' + expt_name
if not os.path.exists(results_path):
os.mkdir(results_path)
kfolder = KFold(n_splits=no_folds)
# we switch the training and test sets because we actually want to train on a small subset
for foldidx, (tr_pair_idxs, test_pair_idxs) in enumerate(kfolder.split(prefs)):
if foldidx >= max_no_folds:
break
# Get training and test data
u_tr = userids[tr_pair_idxs]
i1_tr = items1[tr_pair_idxs]
i2_tr = items2[tr_pair_idxs]
prefs_tr = prefs[tr_pair_idxs]
u_test = userids[test_pair_idxs]
i1_test = items1[test_pair_idxs]
i2_test = items2[test_pair_idxs]
prefs_test = prefs[test_pair_idxs]
print(u_tr)
print(i1_tr)
print(i2_tr)
print(prefs_tr)
fpred_r = []
rho_pred_r = []
acc_r = []
logloss_r = []
times_r = []
for m in methods:
# Train and Predict
logging.info("Starting test with method %s..." % (m))
starttime = time.time()
fpred, rho_pred = train_test(m, u_tr, i1_tr, i2_tr, item_features,
user_features, prefs_tr, u_test, i1_test, i2_test)
endtime = time.time()
times_r.append(endtime - starttime)
# Save predictions
fpred_r.append(fpred.flatten())
rho_pred_r.append(rho_pred.flatten())
# Compute metrics
acc_m = accuracy_score(prefs_test, | np.round(rho_pred) | numpy.round |
from __future__ import division
import warnings
from pycircstat import CI
from pycircstat.iterators import index_bootstrap
import numpy as np
from scipy import stats
import pandas as pd
class BaseRegressor(object):
"""
Basic regressor object. Mother class to all other regressors.
Regressors support indexing which is passed to the coefficients.
Regressors also support calling. In this case the prediction function is called.
"""
def __init__(self):
self._coef = None
def istrained(self):
"""
Returns whether the regressor is trained of not.
:return: True if trained
"""
return self._coef is not None
def train(self, *args, **kwargs):
raise NotImplementedError(u"{0:s}.train not implemented".format(self.__class__.__name__))
def test(self, *args, **kwargs):
raise NotImplementedError(u"{0:s}.test not implemented".format(self.__class__.__name__))
def loss(self, x, y, lossfunc, ci=None, bootstrap_iter=1000):
"""
Computes loss function between the predictions f(x) and the true y.
:param x: inputs in radians. If multidimensional, each row must
be a specimen and each column a feature.
:param y: desired outputs in radians. If multidimensional, each
row must be a specimen and each column a feature.
:param lossfunc: loss function, must take an array of input and outputs and compute the loss.
:param ci: confidence interval in [0,1]. If not None, bootstrapping is performed.
:param bootstrap_iter: number of bootstrap iterations if
:return: loss as computed by the loss function.
"""
if ci is not None:
yhat = self.predict(x)
l = [lossfunc(y[idx], yhat[idx]) for idx in index_bootstrap(x.shape[0], bootstrap_iter)]
mu = np.mean(l)
q = 1 - ci
return mu, CI(np.percentile(l, q / 2. * 100), np.percentile(l, 1 - q / 2. * 100))
return lossfunc(y, self.predict(x))
def predict(self, *args, **kwargs):
raise NotImplementedError(u"{0:s}.predict not implemented".format(self.__class__.__name__))
def __getitem__(self, item):
return self._coef.__getitem__(item)
def __setitem__(self, key, value):
return self._coef.__getitem__(key, value)
def __call__(self, *args, **kwargs):
assert self.istrained(), "Regressor must be trained first."
return self.predict(*args, **kwargs)
class CL1stOrderRegression(BaseRegressor):
"""
Implements a circular linear regression model of the form
.. math::
x = m + a \\cos(\\alpha - \\alpha_0)
The actual model is equivalently implemented as
.. math::
x = c_1 \\cos(\\alpha) + c_2 \\sin(\\alpha) + m
References: [Jammalamadaka2001]_
"""
def __init__(self):
super(CL1stOrderRegression, self).__init__()
def train(self, alpha, x):
"""
Estimates the regression coefficients. Only works for 1D data.
:param alpha: independent variable, angles in radians
:param x: dependent variable
"""
assert alpha.shape == x.shape, "x and alpha need to have the same shape"
assert len(alpha.shape) == 1, "regression only implemented for 1D data"
assert len(x.shape) == 1, "regression only implemented for 1D data"
X = np.c_[np.cos(alpha), | np.sin(alpha) | numpy.sin |
"""Defines a polyhedron."""
import numpy as np
import rowan
from scipy.sparse.csgraph import connected_components
from .base_classes import Shape3D
from .convex_polygon import ConvexPolygon, _is_convex
from .polygon import Polygon, _is_simple
from .sphere import Sphere
from .utils import _generate_ax, _set_3d_axes_equal, translate_inertia_tensor
try:
import miniball
MINIBALL = True
except ImportError:
MINIBALL = False
def _face_to_edges(face, reverse=False):
"""Convert a face into a sequence of edges (tuples).
Args:
face (array-like):
A face composed of vertex indices.
reverse (bool):
Whether to return the edges in reverse.
Returns:
list[tuple[int, int]]:
A list of edges where each is a tuple of a pair of vertices.
"""
shift = 1 if reverse else -1
return list(zip(*np.stack((face, np.roll(face, shift)))))
class Polyhedron(Shape3D):
"""A three-dimensional polytope.
A polyhedron is defined by a set of vertices and a set of faces
composed of the vertices. On construction, the faces are reordered
counterclockwise with respect to an outward normal. The polyhedron
provides various standard geometric calculations, such as volume and
surface area. Most features of the polyhedron can be accessed via
properties, including the plane equations defining the faces and the
neighbors of each face.
.. note::
For the purposes of calculations like moments of inertia, the
polyhedron is assumed to be of constant, unit density.
Args:
vertices (:math:`(N, 3)` :class:`numpy.ndarray`):
The vertices of the polyhedron.
faces (list(list)):
The faces of the polyhedron.
faces_are_convex (bool, optional):
Whether or not the faces of the polyhedron are all convex.
This is used to determine whether certain operations like
coplanar face merging are allowed (Default value: False).
Example:
>>> cube = coxeter.shapes.ConvexPolyhedron(
... [[1, 1, 1], [1, -1, 1], [1, 1, -1], [1, -1, -1],
... [-1, 1, 1], [-1, -1, 1], [-1, 1, -1], [-1, -1, -1]])
>>> cube = coxeter.shapes.Polyhedron(
... vertices=cube.vertices, faces=cube.faces)
>>> bounding_sphere = cube.bounding_sphere
>>> import numpy as np
>>> assert np.isclose(bounding_sphere.radius, np.sqrt(3))
>>> cube.center
array([0., 0., 0.])
>>> cube.circumsphere
<coxeter.shapes.sphere.Sphere object at 0x...>
>>> cube.faces
[array([4, 5, 1, 0], dtype=int32), array([0, 2, 6, 4], dtype=int32),
array([6, 7, 5, 4], dtype=int32), array([0, 1, 3, 2], dtype=int32),
array([5, 7, 3, 1], dtype=int32), array([2, 3, 7, 6], dtype=int32)]
>>> cube.gsd_shape_spec
{'type': 'Mesh', 'vertices': [[1.0, 1.0, 1.0], [1.0, -1.0, 1.0],
[1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0], [-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]], 'faces':
[array([4, 5, 1, 0], dtype=int32), array([0, 2, 6, 4], dtype=int32),
array([6, 7, 5, 4], dtype=int32), array([0, 1, 3, 2], dtype=int32),
array([5, 7, 3, 1], dtype=int32), array([2, 3, 7, 6], dtype=int32)]}
>>> assert np.allclose(
... cube.inertia_tensor,
... np.diag([16. / 3., 16. / 3., 16. / 3.]))
>>> assert np.isclose(cube.iq, np.pi / 6.)
>>> cube.neighbors
[array([1, 2, 3, 4]), array([0, 2, 3, 5]), array([0, 1, 4, 5]),
array([0, 1, 4, 5]), array([0, 2, 3, 5]), array([1, 2, 3, 4])]
>>> cube.normals
array([[ 0., 0., 1.],
[ 0., 1., -0.],
[-1., 0., 0.],
[ 1., -0., 0.],
[ 0., -1., 0.],
[ 0., 0., -1.]])
>>> cube.num_faces
6
>>> cube.num_vertices
8
>>> assert np.isclose(cube.surface_area, 24.0)
>>> cube.vertices
array([[ 1., 1., 1.],
[ 1., -1., 1.],
[ 1., 1., -1.],
[ 1., -1., -1.],
[-1., 1., 1.],
[-1., -1., 1.],
[-1., 1., -1.],
[-1., -1., -1.]])
>>> assert np.isclose(cube.volume, 8.0)
"""
def __init__(self, vertices, faces, faces_are_convex=None):
self._vertices = np.array(vertices, dtype=np.float64)
self._faces = [face for face in faces]
if faces_are_convex is None:
faces_are_convex = all(len(face) == 3 for face in faces)
self._faces_are_convex = faces_are_convex
self._find_equations()
self._find_neighbors()
def _find_equations(self):
"""Find the plane equations of the polyhedron faces."""
self._equations = np.empty((len(self.faces), 4))
for i, face in enumerate(self.faces):
# The direction of the normal is selected such that vertices that
# are already ordered counterclockwise will point outward.
normal = np.cross(
self.vertices[face[2]] - self.vertices[face[1]],
self.vertices[face[0]] - self.vertices[face[1]],
)
normal /= np.linalg.norm(normal)
self._equations[i, :3] = normal
# Sign conventions chosen to match scipy.spatial.ConvexHull
# We use ax + by + cz + d = 0 (not ax + by + cz = d)
self._equations[i, 3] = -normal.dot(self.vertices[face[0]])
def _find_neighbors(self):
"""Find neighbors of faces."""
self._neighbors = [[] for _ in range(self.num_faces)]
for i, j, _ in self._get_face_intersections():
self._neighbors[i].append(j)
self._neighbors[j].append(i)
self._neighbors = [ | np.array(neigh) | numpy.array |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,0]) | numpy.array |
import os
import time
import msgpack
import numpy as np
from loguru import logger
from msgpack import Unpacker
from numpy import typing as npt
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet.interfaces import IAddress
from twisted.internet.protocol import Factory, Protocol, connectionDone
from twisted.python import failure
def _sieve(target_n: int) -> npt.NDArray:
assert 1 < target_n
candidates = np.ones(shape=(target_n + 1,), dtype=bool)
i = 2
while i <= np.sqrt(target_n):
if candidates[i]:
j = | np.square(i) | numpy.square |
"""
gui/average3
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for three-dimensional averaging of particles
:author: <NAME>, 2017-2018
:copyright: Copyright (c) 2017-2018 Jungmann Lab, MPI of Biochemistry
"""
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt5 import QtCore, QtGui, QtWidgets
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis, vx, vy, vz, angle, pixelsize):
if axis == "z":
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == "y":
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == "x":
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Parameters")
self.setModal(False)
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Oversampling:"), 0, 0)
self.oversampling = QtWidgets.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtWidgets.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtWidgets.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(
pixmap.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.FastTransformation,
)
)
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(
self.locs, oversampling, t_min, t_min, t_max, t_max
)
self.set_image(image_avg)
class DatasetDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Datasets")
self.setModal(False)
self.layout = QtWidgets.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self, path):
c = QtWidgets.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Picasso: Average3")
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "average.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
open_action = file_menu.addAction("Open")
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction("Save")
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu("Process")
parameters_action = process_menu.addAction("Parameters")
parameters_action.setShortcut("Ctrl+P")
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction("Datasets")
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = "Zoom"
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtWidgets.QLabel("")
self.viewxz = QtWidgets.QLabel("")
self.viewyz = QtWidgets.QLabel("")
self.viewcp = QtWidgets.QLabel("")
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtWidgets.QGroupBox("Display")
displaygrid = QtWidgets.QGridLayout(display_groupbox)
displaygrid.addWidget(QtWidgets.QLabel("XY"), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtWidgets.QLabel("XZ"), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtWidgets.QLabel("YZ"), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtWidgets.QLabel("CP"), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtWidgets.QGroupBox("Buttons")
buttongrid = QtWidgets.QGridLayout(button_groupbox)
rotation_groupbox = QtWidgets.QGroupBox("Rotation + Translation")
rotationgrid = QtWidgets.QGridLayout(rotation_groupbox)
centerofmassbtn = QtWidgets.QPushButton("Center of Mass XYZ")
axis_groupbox = QtWidgets.QGroupBox("Axis")
axisgrid = QtWidgets.QGridLayout(axis_groupbox)
self.x_axisbtn = QtWidgets.QRadioButton("X")
self.y_axisbtn = QtWidgets.QRadioButton("Y")
self.z_axisbtn = QtWidgets.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtWidgets.QGroupBox("Projection")
projgrid = QtWidgets.QGridLayout(proj_groupbox)
self.xy_projbtn = QtWidgets.QRadioButton("XY")
self.yz_projbtn = QtWidgets.QRadioButton("YZ")
self.xz_projbtn = QtWidgets.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtWidgets.QPushButton("Rotate")
self.radio_sym = QtWidgets.QRadioButton("x symmetry")
self.symEdit = QtWidgets.QSpinBox()
self.symEdit.setRange(2, 100)
self.symEdit.setValue(8)
self.radio_sym_custom = QtWidgets.QRadioButton("custom symmetry")
self.symcustomEdit = QtWidgets.QLineEdit("90,180,270")
deg_groupbox = QtWidgets.QGroupBox("Degrees")
deggrid = QtWidgets.QGridLayout(deg_groupbox)
self.full_degbtn = QtWidgets.QRadioButton("Full")
self.part_degbtn = QtWidgets.QRadioButton("Part")
self.degEdit = QtWidgets.QTextEdit()
self.degEdit = QtWidgets.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtWidgets.QCheckBox("Translate only")
self.flipbtn = QtWidgets.QCheckBox("Consider flipped structures")
self.alignxbtn = QtWidgets.QPushButton("Align X")
self.alignybtn = QtWidgets.QPushButton("Align Y")
self.alignzzbtn = QtWidgets.QPushButton("Align Z_Z")
self.alignzybtn = QtWidgets.QPushButton("Align Z_Y")
self.translatexbtn = QtWidgets.QPushButton("Translate X")
self.translateybtn = QtWidgets.QPushButton("Translate Y")
self.translatezbtn = QtWidgets.QPushButton("Translate Z")
self.rotatexy_convbtn = QtWidgets.QPushButton("Rotate XY - Convolution")
self.scorebtn = QtWidgets.QPushButton("Calculate Score")
operate_groupbox = QtWidgets.QGroupBox("Operate")
operategrid = QtWidgets.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("x-Range (Px)"), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("y-Range (Px)"), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtWidgets.QLineEdit("-1000,1000")
rotationgrid.addWidget(QtWidgets.QLabel("z-Range (nm)"), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn, 4, 0)
operategrid.addWidget(self.scorebtn, 4, 1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtWidgets.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtWidgets.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtWidgets.QGroupBox("Contrast")
contrastgrid = QtWidgets.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = "0,20,40,60,0,20,40,60,0,20,40,60"
MODEL_Y_DEFAULT = "0,20,40,0,20,40,0,20,40,0,20,40"
MODEL_Z_DEFAULT = "0,0,0,0,0,0,0,0,0,0,0,0"
self.modelchk = QtWidgets.QCheckBox("Use Model")
self.model_x = QtWidgets.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtWidgets.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtWidgets.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtWidgets.QPushButton("Preview")
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtWidgets.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtWidgets.QSpinBox()
self.pixelsizeEdit.setRange(1, 999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtWidgets.QGroupBox("Model")
modelgrid = QtWidgets.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk, 0, 0)
modelgrid.addWidget(QtWidgets.QLabel("X-Coordinates"), 1, 0)
modelgrid.addWidget(self.model_x, 1, 1)
modelgrid.addWidget(QtWidgets.QLabel("Y-Coordinates"), 2, 0)
modelgrid.addWidget(self.model_y, 2, 1)
modelgrid.addWidget(QtWidgets.QLabel("Z-Coordinates"), 3, 0)
modelgrid.addWidget(self.model_z, 3, 1)
modelgrid.addWidget(QtWidgets.QLabel("Blur:"), 4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtWidgets.QLabel("Pixelsize:"), 5, 0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6, 0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtWidgets.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage("Average3 ready.")
def open(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open localizations", filter="*.hdf5"
)
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]["Width"] / 2
cy = self.infos[i][0]["Height"] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{"Generated by": "Picasso Average3"}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, "z")
out_path = os.path.splitext(self.locs_paths[i])[0] + "_avg3.hdf5"
path, exe = QtWidgets.QFileDialog.getSaveFileName(
self, "Save localizations", out_path, filter="*.hdf5"
)
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
print("Opening {} ..".format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, "group"):
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle("Error")
msgBox.setText(
("Datafile does not contain group information."
" Please load file with picked localizations.")
)
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, "z"):
locs = lib.append_to_rec(locs, locs.x.copy(), "z")
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize, ok = QtWidgets.QInputDialog.getInt(
self,
"Pixelsize Dialog",
"Please enter the pixelsize in nm",
130,
)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(
self.updateLayout
)
cx = self.infos[-1][0]["Width"] / 2
cy = self.infos[-1][0]["Height"] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean(
[np.median(locs.lpx), np.median(locs.lpy)]
)
if hasattr(locs, "group"):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x), np.min(locs.y)])
self.t_max = np.max([np.max(locs.x), np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min(
[np.min(locs.x), np.min(locs.y), self.t_min]
)
self.t_max = np.max(
[np.max(locs.x), np.max(locs.y), self.t_max]
)
self.z_min = np.min([np.min(locs.z), self.z_min])
self.z_max = np.min([np.max(locs.z), self.z_max])
if len(self.locs) == 1:
print("Dataset loaded from {}.".format(path))
else:
print(
("Dataset loaded from {},"
" Total number of datasets {}.").format(
path, len(self.locs)
)
)
# CREATE GROUP INDEX
if hasattr(locs, "group"):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix(
(n_groups, n_locs), dtype=np.bool
)
progress = lib.ProgressDialog(
"Creating group index", 0, len(groups), self
)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i + 1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
# Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
# stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
# CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max(
[
3
* np.sqrt(
np.mean(self.locs[j].x ** 2 + self.locs[j].y ** 2)
),
self.r,
]
)
self.r_z = np.max(
[5 * np.sqrt(np.mean(self.locs[j].z ** 2)), self.r_z]
)
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print("Aligning by center of mass.. ", end="", flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog(
"Aligning by center of mass", 0, n_groups, self
)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i + 1)
out_locs_x = stack_arrays(
out_locs_x, asrecarray=True, usemask=False
)
out_locs_y = stack_arrays(
out_locs_y, asrecarray=True, usemask=False
)
out_locs_z = stack_arrays(
out_locs_z, asrecarray=True, usemask=False
)
mean_x = np.mean(out_locs_x)
mean_y = | np.mean(out_locs_y) | numpy.mean |
'''
Usage 1: python3 split_and_run.py --dataset [dataset name] --num_split [# of split] --metric [distance measure] --num_leaves [num_leaves] --num_search [num_leaves_to_search] --coarse_training_size [coarse traing sample size] --fine_training_size [fine training sample size] --threshold [threshold] --reorder [reorder size] [--split] [--eval_split]
Usage 2: python3 split_and_run.py --dataset [dataset name] --groundtruth --metric [distance measure]
'''
import sys
import numpy as np
import time
import argparse
import os
import h5py
import math
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('--program', type=str, help='scann, faiss ...')
parser.add_argument('--dataset', type=str, default=None, help='sift1b, glove ...')
parser.add_argument('--num_split', type=int, default=-1, help='# of splits')
parser.add_argument('--metric', type=str, default=None, help='dot_product, squared_l2')
## Common algorithm parameters
parser.add_argument('--L', type=int, default=-1, help='# of coarse codewords')
parser.add_argument('--w', type=int, default=-1, help='# of clusters to search')
parser.add_argument('--m', type=int, default=-1, help='# of dimension chunks')
parser.add_argument('--batch', type=int, default=1, help='query batch size')
parser.add_argument('--csize', type=int, default=10000, help='query size in fast scan cache')
## ScaNN parameters
parser.add_argument('--coarse_training_size', type=int, default=250000, help='coarse training sample size')
parser.add_argument('--fine_training_size', type=int, default=100000, help='fine training sample size')
parser.add_argument('--threshold', type=float, default=0.2, help='anisotropic_quantization_threshold')
parser.add_argument('--reorder', type=int, default=-1, help='reorder size')
## Faiss parameters
parser.add_argument('--k_star', type=int, default=-1, help='# of a single finegrained codewords')
parser.add_argument('--is_gpu', action='store_true')
parser.add_argument('--opq', type=int, default=-1, help='new desired dimension after applying OPQ')
parser.add_argument('--sq', type=int, default=-1, help='desired amount of bits per component after SQ')
parser.add_argument('--flat', type=int, default=-1, help='1 if you want to perform exhaustive search')
## Annoy parameters
parser.add_argument('--n_trees', type=int, default=-1, help='# of trees')
## ScaNN & Annoy common parameters
parser.add_argument('--num_search', type=int, default=-1, help='# of searching leaves for ScaNN, # of searching datapoints for Annoy')
parser.add_argument('--topk', type=int, default=-1, help='# of final result')
## Run options
parser.add_argument('--split', action='store_true')
parser.add_argument('--eval_split', action='store_true')
parser.add_argument('--groundtruth', action='store_true')
parser.add_argument('--sweep', action='store_true')
args = parser.parse_args()
assert args.dataset != None and args.topk <= 1000
if args.split != True:
assert args.metric == "squared_l2" or args.metric == "dot_product" or args.metric=="angular"
if args.eval_split or args.sweep:
assert args.program!=None and args.metric!=None and args.num_split!=-1 and args.topk!=-1
if args.groundtruth:
import ctypes
assert args.metric!=None
if args.program=='scann':
import scann
assert args.is_gpu == False and (args.topk <= args.reorder if args.reorder!=-1 else True)
if args.sweep == False:
assert args.L!=-1 and args.w!=-1 and args.topk!=-1 and args.k_star == -1 and args.m!=-1
assert args.topk!=-1
elif args.program == "faiss":
#if os.environ.get('LD_PRELOAD') == None:
# assert False, "Please set LD_PRELOAD environment path and retry"
# export LD_PRELOAD=/opt/intel/mkl/lib/intel64/libmkl_def.so:/opt/intel/mkl/lib/intel64/libmkl_avx2.so:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_intel_lp64.so:/opt/intel/mkl/lib/intel64/libmkl_intel_thread.so:/opt/intel/lib/intel64_lin/libiomp5.so
from runfaiss import build_faiss, faiss_search, check_cached, faiss_search_flat
import math
if args.sweep == False:
assert args.L!=-1 and args.k_star!=-1 and args.w!=-1 and args.m!=-1
elif args.program == "annoy":
import annoy
if args.batch > 1:
from multiprocessing.pool import ThreadPool
assert args.topk!=-1 and args.is_gpu==False and (args.num_search!=-1 and args.n_trees!=-1 if args.sweep!=True else True)
def compute_recall(neighbors, true_neighbors):
total = 0
for gt_row, row in zip(true_neighbors, neighbors):
# print("SHAPE =", np.shape(np.intersect1d(gt_row, row)))
total += np.intersect1d(gt_row, row).shape[0]
return total / true_neighbors.size
def compute_more_recalls(neighbors, true_neighbors, target, base):
total = 0
trimmed_neighbors = neighbors[:,:base]
trimmed_gt = true_neighbors[:,:target]
num_queries, _ = np.shape(trimmed_gt)
# print("trimmed_neighbors shape =", np.shape(trimmed_neighbors))
# print("trimmed_gt shape =", np.shape(trimmed_gt))
for i in range(num_queries):
curr_neighbors_row = trimmed_neighbors[i]
curr_gt_row = trimmed_gt[i]
for curr_gt_elem in curr_gt_row:
if curr_gt_elem in curr_neighbors_row:
total += 1
return total / trimmed_gt.size
def print_more_recalls(final_neighbors, gt):
print("final_neighbors :", final_neighbors.shape)
print("gt :", gt.shape)
top1_10 = compute_more_recalls(final_neighbors, gt, 1, 10)
top1_100 = compute_more_recalls(final_neighbors, gt, 1, 100)
top10_100 = compute_more_recalls(final_neighbors, gt, 10, 100)
top1_1000 = compute_more_recalls(final_neighbors, gt, 1, 1000)
top10_1000 = compute_more_recalls(final_neighbors, gt, 10, 1000)
top100_1000 = compute_more_recalls(final_neighbors, gt, 100, 1000)
print("Recall 1@10:", top1_10)
print("Recall 1@100:", top1_100)
print("Recall 10@100:", top10_100)
print("Recall 1@1000:", top1_1000)
print("Recall 10@1000:", top10_1000)
print("Recall 100@1000:", top100_1000)
return top1_10, top1_100, top10_100, top1_1000, top10_1000, top100_1000
def ivecs_read(fname):
a = np.fromfile(fname, dtype='int32')
d = a[0]
return a.reshape(-1, d + 1)[:, 1:]
def ivecs_write(fname, m):
n, d = m.shape
dimension_arr = np.zeros((n, 1), dtype=np.int32)
dimension_arr[:, 0] = d
m = np.append(dimension_arr, m, axis=1)
m.tofile(fname)
def bvecs_mmap(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype=np.uint8, mode='r', offset=offset_*132, shape=(shape_*132))
else:
x = np.memmap(fname, dtype=np.uint8, mode='r')
d = x[:4].view('int32')[0]
return x.reshape(-1, d + 4)[:, 4:]
def bvecs_write(fname, m):
n, d = m.shape
dimension_arr = np.zeros((n, 4), dtype=np.uint8)
dimension_arr[:, 0] = d
m = np.append(dimension_arr, m, axis=1)
m.tofile(fname)
def bvecs_read(fname):
b = np.fromfile(fname, dtype=np.uint8)
d = b[:4].view('int32')[0]
return b.reshape(-1, d+4)[:, 4:]
def mmap_fvecs(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype='int32', mode='r', offset=(offset_*(D+1)*4), shape=(shape_*(D+1)))
else:
x = np.memmap(fname, dtype='int32', mode='r')
d = x[0]
return x.reshape(-1, d + 1)[:, 1:].view(np.float32)
# Use for synthetic billion dataset
def mmap_fvecs2(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype='float16', mode='r', offset=(offset_*(D+1)*2), shape=(shape_*(D+1)))
else:
x = np.memmap(fname, dtype='float16', mode='r')
d = int(x[0])
return x.reshape(-1, d + 1)[:, 1:].view(np.float16)
def fvecs_write(fname, m):
m = m.astype('float32')
n, d = m.shape
m1 = np.empty((n, d + 1), dtype='int32')
m1[:, 0] = d
m1[:, 1:] = m.view('int32')
m1.tofile(fname)
def txt_to_fvecs(fname):
txt_arr = np.loadtxt(fname)
if "_clognormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "clognormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "clognormal1m_base.fvecs", txt_arr)
elif "_cnormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "cnormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "cnormal1m_base.fvecs", txt_arr)
elif "_lognormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "lognormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "lognormal1m_base.fvecs", txt_arr)
elif "_normal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "normal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "normal1m_base.fvecs", txt_arr)
def read_fbin(filename, start_idx=0, chunk_size=None):
""" Read *.fbin file that contains float32 vectors
Args:
:param filename (str): path to *.fbin file
:param start_idx (int): start reading vectors from this index
:param chunk_size (int): number of vectors to read.
If None, read all vectors
Returns:
Array of float32 vectors (numpy.ndarray)
"""
with open(filename, "rb") as f:
nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
arr = np.fromfile(f, count=nvecs * dim, dtype=np.float32,
offset=start_idx * 4 * dim)
return arr.reshape(nvecs, dim)
def read_ibin(filename, start_idx=0, chunk_size=None):
""" Read *.ibin file that contains int32 vectors
Args:
:param filename (str): path to *.ibin file
:param start_idx (int): start reading vectors from this index
:param chunk_size (int): number of vectors to read.
If None, read all vectors
Returns:
Array of int32 vectors (numpy.ndarray)
"""
with open(filename, "rb") as f:
nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
arr = np.fromfile(f, count=nvecs * dim, dtype=np.int32,
offset=start_idx * 4 * dim)
return arr.reshape(nvecs, dim)
def write_fbin(filename, vecs):
""" Write an array of float32 vectors to *.fbin file
Args:s
:param filename (str): path to *.fbin file
:param vecs (numpy.ndarray): array of float32 vectors to write
"""
assert len(vecs.shape) == 2, "Input array must have 2 dimensions"
with open(filename, "wb") as f:
nvecs, dim = vecs.shape
f.write(struct.pack('<i', nvecs))
f.write(struct.pack('<i', dim))
vecs.astype('float32').flatten().tofile(f)
def write_ibin(filename, vecs):
""" Write an array of int32 vectors to *.ibin file
Args:
:param filename (str): path to *.ibin file
:param vecs (numpy.ndarray): array of int32 vectors to write
"""
assert len(vecs.shape) == 2, "Input array must have 2 dimensions"
with open(filename, "wb") as f:
nvecs, dim = vecs.shape
f.write(struct.pack('<i', nvecs))
f.write(struct.pack('<i', dim))
vecs.astype('int32').flatten().tofile(f)
def read_data(dataset_path, offset_=None, shape_=None, base=True):
if "sift1m" in args.dataset:
file = dataset_path + "sift_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "deep1m" in args.dataset:
file = dataset_path + "deep1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "deepm96" in args.dataset:
file = dataset_path + "deepm96_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "test" in args.dataset:
file = dataset_path + "test.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "clognormal1m":
file = dataset_path + "clognormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "cnormal1m":
file = dataset_path + "cnormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "lognormal1m":
file = dataset_path + "lognormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "normal1m":
file = dataset_path + "normal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "clognormal1b":
file = dataset_path + "1000000000_128_clognormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "cnormal1b":
file = dataset_path + "1000000000_128_cnormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "lognormal1b":
file = dataset_path + "1000000000_128_lognormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "normal1b":
file = dataset_path + "1000000000_128_normal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "music1m" in args.dataset:
# file = dataset_path + "database_music100.bin" if base else dataset_path
# return np.fromfile(file, dtype = np.float32).reshape(N, D)
file = dataset_path + "split_data/music1m_1_0" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "gist" in args.dataset:
file = dataset_path + "gist_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "sift1b" in args.dataset:
file = dataset_path+"bigann_base.bvecs" if base else dataset_path
return bvecs_mmap(file, offset_=offset_, shape_=shape_)
elif "deep1b" in args.dataset:
file = dataset_path+"deep1B_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "tti1m" in args.dataset:
file = dataset_path+"/split_data/tti1m_1_0" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "tti1b" in args.dataset:
file = dataset_path+"base.1B.fbin" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "glove" in args.dataset:
file = dataset_path+"glove-100-angular.hdf5" if base else dataset_path
if base:
dataset = h5py.File(file, "r")
dataset = np.array(dataset['train'], dtype='float32')
if args.metric == "dot_product":
dataset = dataset / np.linalg.norm(dataset, axis=1)[:, np.newaxis]
if offset_!=None and shape_!=None:
return dataset[offset_:offset_+shape_]
else:
return dataset
else:
dataset = h5py.File(dataset_path, "r")
return | np.array(dataset['dataset'], dtype='float32') | numpy.array |
# -*- coding: utf-8 -*-
"""Proximity Forest time series classifier
a decision tree forest which uses distance measures to partition data.
<NAME> and <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME> and <NAME>
Proximity Forest: an effective and scalable distance-based classifier for
time series,
Data Mining and Knowledge Discovery, 33(3): 607-635, 2019
"""
# linkedin.com/goastler; github.com/goastler
__author__ = ["<NAME>"]
__all__ = ["ProximityForest", "_CachedTransformer", "ProximityStump", "ProximityTree"]
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from scipy import stats
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import normalize
from sklearn.utils import check_random_state
from sktime.classification.base import BaseClassifier
from sktime.distances.elastic_cython import dtw_distance
from sktime.distances.elastic_cython import erp_distance
from sktime.distances.elastic_cython import lcss_distance
from sktime.distances.elastic_cython import msm_distance
from sktime.distances.elastic_cython import twe_distance
from sktime.distances.elastic_cython import wdtw_distance
from sktime.transformers.base import _PanelToPanelTransformer
from sktime.transformers.panel.summarize import DerivativeSlopeTransformer
from sktime.utils import comparison
from sktime.utils import dataset_properties
from sktime.utils.data_container import from_nested_to_2d_array
from sktime.utils.validation.panel import check_X
from sktime.utils.validation.panel import check_X_y
# todo unit tests / sort out current unit tests
# todo logging package rather than print to screen
# todo get params avoid func pointer - use name
# todo set params use func name or func pointer
# todo constructor accept str name func / pointer
# todo duck-type functions
class _CachedTransformer(_PanelToPanelTransformer):
"""Transformer container that transforms data and adds the transformed
version to a cache.
If the transformation is called again on already seen data the data is
fetched
from the cache rather than performing the expensive transformation.
Parameters
----------
transformer : the transformer to transform uncached data
Attributes
----------
cache : location to store transforms seen before for fast look up
"""
_required_parameters = ["transformer"]
def __init__(self, transformer):
self.cache = {}
self.transformer = transformer
super(_CachedTransformer, self).__init__()
def clear(self):
"""
clear the cache
"""
self.cache = {}
def transform(self, X, y=None):
"""
Fit transformer, creating a cache for transformation.
Parameters
----------
X : pandas DataFrame of shape [n_instances, n_features]
Input data
y : pandas Series, shape (n_instances), optional
Targets for supervised learning.
Returns
-------
cached_instances.
"""
# for each instance, get transformed instance from cache or
# transform and add to cache
cached_instances = {}
uncached_indices = []
for index in X.index.values:
try:
cached_instances[index] = self.cache[index]
except Exception:
uncached_indices.append(index)
if len(uncached_indices) > 0:
uncached_instances = X.loc[uncached_indices, :]
transformed_uncached_instances = self.transformer.fit_transform(
uncached_instances
)
transformed_uncached_instances.index = uncached_instances.index
transformed_uncached_instances = transformed_uncached_instances.to_dict(
"index"
)
self.cache.update(transformed_uncached_instances)
cached_instances.update(transformed_uncached_instances)
cached_instances = pd.DataFrame.from_dict(cached_instances, orient="index")
return cached_instances
def __str__(self):
return self.transformer.__str__()
def _derivative_distance(distance_measure, transformer):
"""
take derivative before conducting distance measure
:param distance_measure: the distance measure to use
:param transformer: the transformer to use
:return: a distance measure function with built in transformation
"""
def distance(instance_a, instance_b, **params):
df = pd.DataFrame([instance_a, instance_b])
df = transformer.transform(X=df)
instance_a = df.iloc[0, :]
instance_b = df.iloc[1, :]
return distance_measure(instance_a, instance_b, **params)
return distance
def distance_predefined_params(distance_measure, **params):
"""
conduct distance measurement with a predefined set of parameters
:param distance_measure: the distance measure to use
:param params: the parameters to use in the distance measure
:return: a distance measure with no parameters
"""
def distance(instance_a, instance_b):
return distance_measure(instance_a, instance_b, **params)
return distance
def cython_wrapper(distance_measure):
"""
wrap a distance measure in cython conversion (to 1 column per dimension
format)
:param distance_measure: distance measure to wrap
:return: a distance measure which automatically formats data for cython
distance measures
"""
def distance(instance_a, instance_b, **params):
# find distance
instance_a = from_nested_to_2d_array(
instance_a, return_numpy=True
) # todo use specific
# dimension rather than whole
# thing?
instance_b = from_nested_to_2d_array(
instance_b, return_numpy=True
) # todo use specific
# dimension rather than whole thing?
instance_a = np.transpose(instance_a)
instance_b = np.transpose(instance_b)
return distance_measure(instance_a, instance_b, **params)
return distance
def pure(y):
"""
test whether a set of class labels are pure (i.e. all the same)
----
Parameters
----
y : 1d array like
array of class labels
----
Returns
----
result : boolean
whether the set of class labels is pure
"""
# get unique class labels
unique_class_labels = np.unique(np.array(y))
# if more than 1 unique then not pure
return len(unique_class_labels) <= 1
def gini_gain(y, y_subs):
"""
get gini score of a split, i.e. the gain from parent to children
----
Parameters
----
y : 1d array like
array of class labels at parent
y_subs : list of 1d array like
list of array of class labels, one array per child
----
Returns
----
score : float
gini score of the split from parent class labels to children. Note a
higher score means better gain,
i.e. a better split
"""
y = np.array(y)
# find number of instances overall
parent_n_instances = y.shape[0]
# if parent has no instances then is pure
if parent_n_instances == 0:
for child in y_subs:
if len(child) > 0:
raise ValueError("children populated but parent empty")
return 0.5
# find gini for parent node
score = gini(y)
# sum the children's gini scores
for index in range(len(y_subs)):
child_class_labels = y_subs[index]
# ignore empty children
if len(child_class_labels) > 0:
# find gini score for this child
child_score = gini(child_class_labels)
# weight score by proportion of instances at child compared to
# parent
child_size = len(child_class_labels)
child_score *= child_size / parent_n_instances
# add to cumulative sum
score -= child_score
return score
def gini(y):
"""
get gini score at a specific node
----
Parameters
----
y : 1d numpy array
array of class labels
----
Returns
----
score : float
gini score for the set of class labels (i.e. how pure they are). A
larger score means more impurity. Zero means
pure.
"""
y = | np.array(y) | numpy.array |
from hazel.chromosphere import Hazel_atmosphere
from hazel.photosphere import SIR_atmosphere
from hazel.parametric import Parametric_atmosphere
from hazel.stray import Straylight_atmosphere
from hazel.configuration import Configuration
from hazel.io import Generic_output_file
from collections import OrderedDict
from hazel.codes import hazel_code, sir_code
from hazel.spectrum import Spectrum
from hazel.transforms import transformed_to_physical, physical_to_transformed, jacobian_transformation
import hazel.util
import numpy as np
import copy
import os
from pathlib import Path
import scipy.stats
import scipy.special
import scipy.signal
import scipy.linalg
import scipy.optimize
import warnings
import logging
import sys
__all__ = ['Model']
class Model(object):
def __init__(self, config=None, working_mode='synthesis', verbose=0, debug=False, rank=0, randomization=None, root=''):
np.random.seed(123)
if (rank != 0):
return
self.photospheres = []
self.chromospheres = []
self.chromospheres_order = []
self.atmospheres = {}
self.order_atmospheres = []
self.straylight = []
self.parametric = []
self.spectrum = []
self.configuration = None
self.n_cycles = 1
self.spectrum = {}
self.topologies = []
self.straylights = []
self.working_mode = working_mode
self.pixel = 0
self.debug = debug
self.use_analytical_RF_if_possible = False
self.nlte_available = False
self.use_nlte = False
self.root = root
self.epsilon = 1e-2
self.svd_tolerance = 1e-8
self.step_limiter_inversion = 1.0
self.backtracking = 'brent'
self.verbose = verbose
self.logger = logging.getLogger("model")
self.logger.setLevel(logging.DEBUG)
self.logger.handlers = []
ch = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# Set randomization
if (randomization is None):
self.n_randomization = 1
else:
self.n_randomization = randomization
if (self.verbose >= 1):
self.logger.info('Hazel2 v1.0')
if ('torch' in sys.modules and 'torch_geometric' in sys.modules):
if (self.verbose >= 1):
self.logger.info('PyTorch and PyTorch Geometric found. NLTE for Ca II is available')
self.nlte_available = True
if (config is not None):
if (self.verbose >= 1):
self.logger.info('Using configuration from file : {0}'.format(config))
self.configuration = Configuration(config)
self.use_configuration(self.configuration.config_dict)
# Initialize pyhazel
hazel_code._init()
def __getstate__(self):
d = self.__dict__.copy()
if 'logger' in d:
d['logger'] = d['logger'].name
return d
def __setstate__(self, d):
if 'logger' in d:
d['logger'] = logging.getLogger(d['logger'])
self.__dict__.update(d)
def __str__(self):
tmp = ''
for l, par in self.__dict__.items():
if (l != 'LINES'):
tmp += '{0}: {1}\n'.format(l, par)
return tmp
def use_configuration(self, config_dict):
"""
Use a configuration file
Parameters
----------
config_dict : dict
Dictionary containing all the options from the configuration file previously read
Returns
-------
None
"""
# Deal with the spectral regions
tmp = config_dict['spectral regions']
# Output file
self.output_file = config_dict['working mode']['output file']
# Backtracking mode
if ('backtracking' in config_dict['working mode']):
self.backtracking = config_dict['working mode']['backtracking']
else:
self.backtracking = 'brent'
if (self.verbose >= 1):
self.logger.info('Backtracking mode : {0}'.format(self.backtracking))
# Working mode
# self.working_mode = config_dict['working mode']['action']
# Add spectral regions
for key, value in config_dict['spectral regions'].items():
self.add_spectral(value)
# Set number of cycles if present
if (self.working_mode == 'inversion'):
if ('number of cycles' in config_dict['working mode']):
if (config_dict['working mode']['number of cycles'] != 'None'):
self.n_cycles = int(config_dict['working mode']['number of cycles'])
if (self.verbose >= 1):
self.logger.info('Using {0} cycles'.format(self.n_cycles))
# Use analytical RFs if possible
if ('analytical rf if possible' in config_dict['working mode']):
if (config_dict['working mode']['analytical rf if possible'] != 'None'):
self.use_analytical_RF_if_possible = hazel.util.tobool(config_dict['working mode']['analytical rf if possible'])
else:
self.use_analytical_RF_if_possible = False
else:
self.use_analytical_RF_if_possible = False
if (self.verbose >= 1):
self.logger.info('Using analytical RFs if possible : {0}'.format(self.use_analytical_RF_if_possible))
# Set number of maximum iterations
if ('maximum iterations' in config_dict['working mode']):
if (config_dict['working mode']['number of cycles'] != 'None'):
self.max_iterations = int(config_dict['working mode']['maximum iterations'])
else:
self.max_iterations = 10
else:
self.max_iterations = 10
if (self.verbose >= 1):
self.logger.info('Using {0} max. iterations'.format(self.max_iterations))
# Randomization
if (self.verbose >= 1):
if (self.n_randomization == 1):
self.logger.info('Not using randomizations')
else:
self.logger.info('Using a maximum of {0} randomizations'.format(self.n_randomization))
# Set number of maximum iterations
if ('relative error' in config_dict['working mode']):
if (config_dict['working mode']['relative error'] != 'None'):
self.relative_error = float(config_dict['working mode']['relative error'])
if (self.verbose >= 1):
self.logger.info('Stopping when relative error is below {0}'.format(self.relative_error))
else:
self.relative_error = 1e-4
else:
self.relative_error = 1e-4
# Save all cycles
if ('save all cycles' not in config_dict['working mode']):
self.save_all_cycles = False
else:
self.save_all_cycles = hazel.util.tobool(config_dict['working mode']['save all cycles'])
if (self.verbose >= 1):
self.logger.info('Saving all cycles : {0}'.format(self.save_all_cycles))
# Deal with the atmospheres
tmp = config_dict['atmospheres']
self.atmospheres = {}
if (self.verbose >= 1):
self.logger.info('Adding atmospheres')
for key, value in tmp.items():
if ('photosphere' in key):
if (self.verbose >=1):
self.logger.info(' - New available photosphere : {0}'.format(value['name']))
self.add_photosphere(value)
if ('chromosphere' in key):
if (self.verbose >= 1):
self.logger.info(' - New available chromosphere : {0}'.format(value['name']))
self.add_chromosphere(value)
if ('parametric' in key):
if (self.verbose >= 1):
self.logger.info(' - New available parametric : {0}'.format(value['name']))
self.add_parametric(value)
if ('straylight' in key):
if (self.verbose >= 1):
self.logger.info(' - New available straylight : {0}'.format(value['name']))
self.add_straylight(value)
self.setup()
def setup(self):
"""
Setup the model for synthesis/inversion. This setup includes adding the topologies, removing unused
atmospheres, reading the number of cycles for the inversion and some sanity checks
Parameters
----------
None
Returns
-------
None
"""
# Adding topologies
if (self.verbose >= 1):
self.logger.info("Adding topologies")
for value in self.topologies:
self.add_topology(value)
# Remove unused atmospheres defined in the configuration file and not in the topology
if (self.verbose >= 1):
self.logger.info("Removing unused atmospheres")
self.remove_unused_atmosphere()
# Calculate indices for atmospheres
index_chromosphere = 1
index_photosphere = 1
self.n_photospheres = 0
self.n_chromospheres = 0
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
v.index = index_photosphere
index_photosphere += 1
self.n_photospheres += 1
if (v.type == 'chromosphere'):
v.index = index_chromosphere
index_chromosphere += 1
self.n_chromospheres += 1
# Use analytical RFs if only photospheres are defined
if (self.n_chromospheres == 0 and self.use_analytical_RF_if_possible):
self.use_analytical_RF = True
if (self.verbose >= 1):
self.logger.info('Using analytical RFs : {0}'.format(self.use_analytical_RF))
else:
self.use_analytical_RF = False
# Check that number of pixels is the same for all atmospheric files if in synthesis mode
if (self.working_mode == 'synthesis'):
n_pixels = [v.n_pixel for k, v in self.atmospheres.items()]
all_equal = all(x == n_pixels[0] for x in n_pixels)
if (not all_equal):
for k, v in self.atmospheres.items():
self.logger.info('{0} -> {1}'.format(k, v.n_pixel))
raise Exception("Files with model atmospheres do not contain the same number of pixels")
else:
if (self.verbose >= 1):
self.logger.info('Number of pixels to read : {0}'.format(n_pixels[0]))
self.n_pixels = n_pixels[0]
if (self.working_mode == 'inversion'):
n_pixels = [v.n_pixel for k, v in self.spectrum.items()]
all_equal = all(x == n_pixels[0] for x in n_pixels)
if (not all_equal):
for k, v in self.spectrum.items():
self.logger.info('{0} -> {1}'.format(k, v.n_pixel))
raise Exception("Files with spectral regions do not contain the same number of pixels")
else:
if (self.verbose >= 1):
self.logger.info('Number of pixels to invert : {0}'.format(n_pixels[0]))
self.n_pixels = n_pixels[0]
# Check that the number of pixels from all observations (in case of inversion) is the same
# Check also if they are equal to those of the models
# n_pixels = [v.n_pixel for k, v in self.atmospheres.items()]
# all_equal = all(x == n_pixels[0] for x in n_pixels)
# Check that the number of cycles is the same for all atmospheres (in case of inversion)
if (self.working_mode == 'inversion'):
cycles = []
for k, v in self.atmospheres.items():
for k2, v2 in v.cycles.items():
if (v2 is not None):
cycles.append(len(v2))
all_equal = all(x == cycles[0] for x in cycles)
if (not all_equal):
raise Exception("Number of cycles in the nodes of active atmospheres is not always the same")
else:
if (self.n_cycles is None):
self.n_cycles = cycles[0]
# if (self.working_mode == 'inversion'):
# cycles = []
# for tmp in ['I', 'Q', 'U', 'V']:
# if ( cycles.append
# for k, v in self.atmospheres.items():
# for k2, v2 in v.cycles.items():
# cycles.append(len(v2))
# all_equal = all(x == cycles[0] for x in cycles)
# if (not all_equal):
# raise Exception("Number of cycles in the nodes of active atmospheres is not always the same")
# else:
# if (self.n_cycles is None):
# self.n_cycles = cycles[0]
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
self.LINES = ff.readlines()
ff.close()
self.init_sir()
for k, v in self.spectrum.items():
v.allocate_info_cycles(n_cycles=self.n_cycles)
for k, v in self.atmospheres.items():
v.allocate_info_cycles(n_cycles=self.n_cycles)
# Count total number of free parameters
if (self.working_mode == 'inversion'):
self.n_free_parameters = 0
for k, v in self.atmospheres.items():
for k2, v2 in v.cycles.items():
if (v2 is not None):
self.n_free_parameters += max(hazel.util.onlyint(v2[0:self.n_cycles+1]))
if (self.verbose >= 1):
self.logger.info('Total number of free parameters in all cycles : {0}'.format(self.n_free_parameters))
def open_output(self):
self.output_handler = Generic_output_file(self.output_file)
self.output_handler.open(self)
def close_output(self):
self.output_handler.close()
def write_output(self, randomization=0):
if (self.working_mode == 'synthesis'):
self.flatten_parameters_to_reference(cycle=0)
self.output_handler.write(self, pixel=0, randomization=randomization)
def add_spectral(self, spectral):
"""
Programmatically add a spectral region
Parameters
----------
spectral : dict
Dictionary containing the following data
'Name', 'Wavelength', 'Topology', 'Weights Stokes', 'Wavelength file', 'Wavelength weight file',
'Observations file', 'Mask file'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
value = hazel.util.lower_dict_keys(spectral)
if (self.verbose >= 1):
self.logger.info('Adding spectral region {0}'.format(value['name']))
if ('wavelength file' not in value):
value['wavelength file'] = None
elif (value['wavelength file'] == 'None'):
value['wavelength file'] = None
if ('wavelength weight file' not in value):
value['wavelength weight file'] = None
elif (value['wavelength weight file'] == 'None'):
value['wavelength weight file'] = None
if ('observations file' not in value):
value['observations file'] = None
elif (value['observations file'] == 'None'):
value['observations file'] = None
if ('stokes weights' not in value):
value['stokes weights'] = None
elif (value['stokes weights'] == 'None'):
value['stokes weights'] = None
if ('mask file' not in value):
value['mask file'] = None
elif (value['mask file'] == 'None'):
value['mask file'] = None
if ('los' not in value):
value['los'] = None
elif (value['los'] == 'None'):
value['los'] = None
for tmp in ['i', 'q', 'u', 'v']:
if ('weights stokes {0}'.format(tmp) not in value):
value['weights stokes {0}'.format(tmp)] = [None]*10
elif (value['weights stokes {0}'.format(tmp)] == 'None'):
value['weights stokes {0}'.format(tmp)] = [None]*10
if ('boundary condition' not in value):
value['boundary condition'] = None
elif (value['boundary condition'] == 'None'):
value['boundary condition'] = None
if ('instrumental profile' not in value):
value['instrumental profile'] = None
elif (value['instrumental profile'] == 'None'):
value['instrumental profile'] = None
# Wavelength file is not present
if (value['wavelength file'] is None):
# If the wavelength is defined
if ('wavelength' in value):
axis = value['wavelength']
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
wvl_lr = None
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
else:
raise Exception('Wavelength range is not defined. Please, use "Wavelength" or "Wavelength file"')
else:
# If both observed and synthetic wavelength points are given
if ('wavelength' in value):
axis = value['wavelength']
if (len(axis) != 3):
raise Exception("Wavelength range is not given in the format: lower, upper, steps")
wvl = np.linspace(float(axis[0]), float(axis[1]), int(axis[2]))
if (self.verbose >= 1):
self.logger.info(' - Using wavelength axis from {0} to {1} with {2} steps'.format(float(axis[0]), float(axis[1]), int(axis[2])))
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl_lr = np.loadtxt(self.root + value['wavelength file'])
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength axis from {0}'.format(value['wavelength file']))
wvl = np.loadtxt(self.root + value['wavelength file'])
wvl_lr = None
if (value['wavelength weight file'] is None):
if (self.verbose >= 1 and self.working_mode == 'inversion'):
self.logger.info(' - Setting all wavelength weights to 1')
weights = np.ones((4,len(wvl)))
else:
if (self.verbose >= 1):
self.logger.info(' - Reading wavelength weights from {0}'.format(value['wavelength weight file']))
weights = np.loadtxt(self.root + value['wavelength weight file'], skiprows=1).T
# Observations file not present
if (value['observations file'] is None):
if (self.working_mode == 'inversion'):
raise Exception("Inversion mode without observations is not allowed.")
obs_file = None
else:
if (self.verbose >= 1):
self.logger.info(' - Using observations from {0}'.format(value['observations file']))
obs_file = value['observations file']
if (value['mask file'] is None):
mask_file = None
if (self.verbose >= 1):
self.logger.info(' - No mask for pixels')
else:
if (self.verbose >= 1):
self.logger.info(' - Using mask from {0}'.format(value['mask file']))
mask_file = value['mask file']
if (value['instrumental profile'] is None):
if (self.verbose >= 1):
self.logger.info(' - No instrumental profile')
else:
if (self.verbose >= 1):
self.logger.info(' - Instrumental profile : {0}'.format(value['instrumental profile']))
# if (value['straylight file'] is None):
# if (self.verbose >= 1):
# self.logger.info(' - Not using straylight')
# stray_file = None
# else:
# if (self.verbose >= 1):
# self.logger.info(' - Using straylight from {0}'.format(value['straylight file']))
# stray_file = value['straylight file']
if (value['los'] is None):
if (self.working_mode == 'synthesis'):
raise Exception("You need to provide the LOS for spectral region {0}".format(value['name']))
los = None
else:
los = np.array(value['los']).astype('float64')
if (self.verbose >= 1):
self.logger.info(' - Using LOS {0}'.format(value['los']))
if (value['boundary condition'] is None):
if (self.verbose >= 1):
self.logger.info(' - Using default boundary conditions [1,0,0,0] in spectral region {0} or read from file. Check carefully!'.format(value['name']))
boundary = np.array([1.0,0.0,0.0,0.0])
self.normalization = 'on-disk'
else:
boundary = np.array(value['boundary condition']).astype('float64')
if (boundary[0] == 0.0):
if (self.verbose >= 1):
self.logger.info(' - Using off-limb normalization (peak intensity)')
if (self.verbose >= 1):
self.logger.info(' - Using boundary condition {0}'.format(value['boundary condition']))
stokes_weights = []
for st in ['i', 'q', 'u', 'v']:
tmp = hazel.util.tofloat(value['weights stokes {0}'.format(st)])
tmp = [i if i is not None else 1.0 for i in tmp]
stokes_weights.append(tmp)
stokes_weights = np.array(stokes_weights)
self.spectrum[value['name']] = Spectrum(wvl=wvl, weights=weights, observed_file=obs_file,
name=value['name'], stokes_weights=stokes_weights, los=los, boundary=boundary, mask_file=mask_file, instrumental_profile=value['instrumental profile'], root=self.root, wvl_lr=wvl_lr)
self.topologies.append(value['topology'])
def add_photosphere(self, atmosphere):
"""
Programmatically add a photosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = SIR_atmosphere(working_mode=self.working_mode, name=atm['name'], verbose=self.verbose)
lines = [int(k) for k in list(atm['spectral lines'])]
# If NLTE is available because PyTorch and PyTorch Geom are available
# check whether the line is needed in NLTE or not
if self.nlte_available:
if ('nlte' not in atm):
self.atmospheres[atm['name']].nlte = False
else:
self.atmospheres[atm['name']].nlte = hazel.util.tobool(atm['nlte'])
if (self.verbose >= 1):
self.logger.info(" * Line in NLTE if available")
else:
self.atmospheres[atm['name']].nlte = False
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
if ('reference frame' in atm):
if ('line-of-sight' in atm['reference frame']):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if ('vertical' in atm['reference frame']):
raise Exception('Magnetic fields in photospheres are always in the line-of-sight reference frame.')
else:
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(lines))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
self.atmospheres[atm['name']].add_active_line(lines=lines, spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range), verbose=self.verbose)
if (self.atmospheres[atm['name']].graphnet_nlte is not None):
self.set_nlte(True)
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
if ('temperature change to recompute departure coefficients' in atm):
self.atmospheres[atm['name']].t_change_departure = float(atm['temperature change to recompute departure coefficients'])
else:
self.atmospheres[atm['name']].t_change_departure = 0.0
def add_chromosphere(self, atmosphere):
"""
Programmatically add a chromosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Height', 'Line', 'Wavelength', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Hazel_atmosphere(working_mode=self.working_mode, name=atm['name'])
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(line=atm['line'], spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('reference frame' in atm):
if (atm['reference frame'] == 'line-of-sight'):
self.atmospheres[atm['name']].reference_frame = 'line-of-sight'
if (atm['reference frame'] == 'vertical'):
self.atmospheres[atm['name']].reference_frame = 'vertical'
else:
self.atmospheres[atm['name']].reference_frame = 'vertical'
if (self.verbose >= 1):
self.logger.info(" * Adding line : {0}".format(atm['line']))
self.logger.info(" * Magnetic field reference frame : {0}".format(self.atmospheres[atm['name']].reference_frame))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
if ('coordinates for magnetic field vector' in atm):
if (atm['coordinates for magnetic field vector'] == 'cartesian'):
self.atmospheres[atm['name']].coordinates_B = 'cartesian'
if (atm['coordinates for magnetic field vector'] == 'spherical'):
self.atmospheres[atm['name']].coordinates_B = 'spherical'
else:
self.atmospheres[atm['name']].coordinates_B = 'cartesian'
self.atmospheres[atm['name']].select_coordinate_system()
if (self.verbose >= 1):
self.logger.info(" * Magnetic field coordinates system : {0}".format(self.atmospheres[atm['name']].coordinates_B))
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
self.atmospheres[atm['name']].height = float(atm['height'])
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
def add_parametric(self, atmosphere):
"""
Programmatically add a parametric atmosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Wavelength', 'Reference atmospheric model', 'Type',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Parametric_atmosphere(working_mode=self.working_mode)
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
if ('reference atmospheric model' in atm):
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
def add_straylight(self, atmosphere):
"""
Programmatically add a straylight atmosphere
Parameters
----------
atmosphere : dict
Dictionary containing the following data
'Name', 'Spectral region', 'Reference atmospheric model',
'Ranges', 'Nodes'
Returns
-------
None
"""
# Make sure that all keys of the input dictionary are in lower case
# This is irrelevant if a configuration file is used because this has been
# already done
atm = hazel.util.lower_dict_keys(atmosphere)
self.atmospheres[atm['name']] = Straylight_atmosphere(working_mode=self.working_mode)
if ('wavelength' not in atm):
atm['wavelength'] = None
elif (atm['wavelength'] == 'None'):
atm['wavelength'] = None
if (atm['wavelength'] is not None):
wvl_range = [float(k) for k in atm['wavelength']]
else:
wvl_range = [np.min(self.spectrum[atm['spectral region']].wavelength_axis), np.max(self.spectrum[atm['spectral region']].wavelength_axis)]
self.atmospheres[atm['name']].add_active_line(spectrum=self.spectrum[atm['spectral region']],
wvl_range=np.array(wvl_range))
if ('ranges' in atm):
for k, v in atm['ranges'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].ranges[k2] = None
else:
self.atmospheres[atm['name']].ranges[k2] = hazel.util.tofloat(v)
my_file = Path(self.root + atm['reference atmospheric model'])
if (not my_file.exists()):
raise FileExistsError("Input file {0} for atmosphere {1} does not exist.".format(my_file, atm['name']))
if ('reference atmospheric model' in atm):
self.atmospheres[atm['name']].load_reference_model(self.root + atm['reference atmospheric model'], self.verbose)
if (self.atmospheres[atm['name']].model_type == '3d'):
self.atmospheres[atm['name']].n_pixel = self.atmospheres[atm['name']].model_handler.get_npixel()
# Set values of parameters
if ('nodes' in atm):
for k, v in atm['nodes'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
self.atmospheres[atm['name']].cycles[k2] = hazel.util.toint(v)
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
self.atmospheres[atm['name']].regularization[k2] = None
if ('regularization' in atm):
for k, v in atm['regularization'].items():
for k2, v2 in self.atmospheres[atm['name']].parameters.items():
if (k.lower() == k2.lower()):
if (v == 'None'):
self.atmospheres[atm['name']].regularization[k2] = None
else:
self.atmospheres[atm['name']].regularization[k2] = v
def remove_unused_atmosphere(self):
"""
Remove unused atmospheres
Parameters
----------
None
Returns
-------
None
"""
to_remove = []
for k, v in self.atmospheres.items():
if (not v.active):
to_remove.append(k)
if (self.verbose >= 1):
self.logger.info(' - Atmosphere {0} deleted.'.format(k))
for k in to_remove:
self.atmospheres.pop(k)
def init_sir_external(self):
"""
Initialize SIR for this synthesis
Parameters
----------
None
Returns
-------
None
"""
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
f = open('lte.grid', 'w')
f.write("IMPORTANT: a) All items must be separated by commas. \n")
f.write(" b) The first six characters of the last line \n")
f.write(" in the header (if any) must contain the symbol --- \n")
f.write("\n")
f.write("Line and blends indices : Initial lambda Step Final lambda \n")
f.write("(in this order) (mA) (mA) (mA) \n")
f.write("-----------------------------------------------------------------------\n")
ind_low = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[0])).argmin()
ind_top = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[1])).argmin()
low = v.spectrum.wavelength_axis[ind_low]
top = v.spectrum.wavelength_axis[ind_top] # TODO
delta = (v.spectrum.wavelength_axis[1] - v.spectrum.wavelength_axis[0])
filename = os.path.join(os.path.dirname(__file__),'data/LINEAS')
ff = open(filename, 'r')
flines = ff.readlines()
ff.close()
for i in range(len(v.lines)):
for l in flines:
tmp = l.split()
index = int(tmp[0].split('=')[0])
if (index == v.lines[0]):
wvl = float(tmp[2])
f.write("{0} : {1}, {2}, {3}\n".format(str(v.lines)[1:-1], 1e3*(low-wvl), 1e3*delta, 1e3*(top-wvl)))
f.close()
v.n_lambda = sir_code.init_externalfile(v.index, filename)
def init_sir(self):
"""
Initialize SIR for this synthesis. This version does not make use of any external file, which might be
not safe when running in MPI mode.
Parameters
----------
None
Returns
-------
None
"""
lines = []
n_lines = 0
elements = {'H':1,'HE':2,'LI':3,'BE':4,'B':5,'C':6,'N':7,'O':8,'F':9,'NE':10,
'NA':11,'MG':12,'AL':13,'SI':14,'P':15,'S':16,'CL':17,'AR':18,'K':19,'CA':20,'SC':21,'TI':22,'V':23,'CR':24,
'MN':25,'FE':26,'CO':27,'NI':28,'CU':29,'ZN':30,'GA':31,'GE':32,'AS':33,'SE':34,'BR':35,'KR':36,
'RB':37,'SR':38,'Y':39,'ZR':40,'NB':41,'MO':42,'TC':43,'RU':44,'RH':45,'PD':46,'AG':47,'CD':48,'IN':49,
'SN':50,'SB':51,'TE':52,'I':53,'XE':54,'CS':55,'BA':56,'LA':57,'CE':58,'PR':59,'ND':60,'PM':61,
'SM':62,'EU':63,'GD':64,'TB':65,'DY':66,'HO':67,'ER':68,'TM':69,'YB':70,'LU':71,'HF':72,'TA':73,'W':74,
'RE':75,'OS':76,'IR':77,'PT':78,'AU':79,'HG':80,'TL':81,'PB':82,'BI':83,'PO':84,'AT':85,'RN':86,
'FR':87,'RA':88,'AC':89,'TH':90,'PA':91,'U':92}
states = {'S': 0, 'P': 1, 'D': 2, 'F': 3, 'G': 4, 'H': 5, 'I': 6}
for k, v in self.atmospheres.items():
if (v.type == 'photosphere'):
n_lines += 1
ind_low = (np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[0])).argmin()
ind_top = ( | np.abs(v.spectrum.wavelength_axis - v.wvl_range_lambda[1]) | numpy.abs |
# -*- coding: utf-8 -*-
"""
==========================================
GammaEyes Version 1.0.1 ==
Created at: 2021.07.19 ==
Author@<NAME> ==
Email <EMAIL> ==
==========================================
GammaEyes is an open source software for
gamma spectrum processing and analysis.
=============== Main ====================
"""
# ======================================================================
# Import the required modules
import sys, os
# PyQt5 modules
from PyQt5.QtWidgets import *
from PyQt5 import sip, QtWidgets, QtCore, QtGui
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import (QMenu, QApplication, QTableWidgetItem)
# geGui modules
from geGui import Ui_MainWindow
# ge modules
from lib.geDenoise import geDenoise
from lib.geFindPeaks import geFindpeaks
from lib.geFwhmCali import geFwhmCali
from lib.geStable import geStabilization
from lib.geBroaden import geBroaden
from lib.geMCReader import geMCReader
from lib.geFit import geFit
from lib.geDatabase import geDatabase
from lib.geRWIO import geRWIO
from lib.geFSA import geFSA
# Else
import numpy as np
import matplotlib
matplotlib.use("Qt5Agg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FC
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
class MainWindow(QMainWindow,Ui_MainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
# ==========================================================================================
# Setting interface conversion
self.stackedWidget.setCurrentIndex(0)
self.pushButton.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(0))
self.pushButton_2.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(1))
self.pushButton_3.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(2))
self.pushButton_4.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(3))
self.pushButton_5.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(4))
self.pushButton_6.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(5))
self.pushButton_7.clicked.connect(lambda: self.stackedWidget.setCurrentIndex(6))
self.cbx_fsa_wlls_type.currentIndexChanged.connect(lambda: self.slot_cbx_fsa_wlls_change())
# ==========================================================================================
# Table Setting
# ------------------
# Table 1
# set customMenu
self.tableWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.tableWidget.customContextMenuRequested.connect(self.showCustomMenu)
# self.setLayout(conLayout)
self.contextMenu = QMenu(self)
self.ADD = self.contextMenu.addAction('Add')
self.DELE = self.contextMenu.addAction('Delete')
self.ADD.triggered.connect(lambda: self.table_add(self.tableWidget))
self.DELE.triggered.connect(lambda: self.table_delete(self.tableWidget))
# --------
# Table 2
self.table_eva_fwhm_cr.setContextMenuPolicy(Qt.CustomContextMenu) ######
self.table_eva_fwhm_cr.customContextMenuRequested.connect(self.showCustomMenu1) ####
self.contextMenu1 = QMenu(self)
self.ADD1 = self.contextMenu1.addAction('Add')
self.DELE1 = self.contextMenu1.addAction('Delete')
self.ADD1.triggered.connect(lambda: self.table_add(self.table_eva_fwhm_cr))
self.DELE1.triggered.connect(lambda: self.table_delete(self.table_eva_fwhm_cr))
# --------
# Table 3
self.table_eva_fwhm_ln.setContextMenuPolicy(Qt.CustomContextMenu) ######
self.table_eva_fwhm_ln.customContextMenuRequested.connect(self.showCustomMenu2) ####
self.contextMenu2 = QMenu(self)
self.ADD2 = self.contextMenu2.addAction('Add')
self.DELE2 = self.contextMenu2.addAction('Delete')
self.ADD2.triggered.connect(lambda: self.table_add(self.table_eva_fwhm_ln))
self.DELE2.triggered.connect(lambda: self.table_delete(self.table_eva_fwhm_ln))
# --------
# Table 4
self.table_fit_sci.setContextMenuPolicy(Qt.CustomContextMenu) ######
self.table_fit_sci.customContextMenuRequested.connect(self.showCustomMenu3) ####
self.contextMenu3 = QMenu(self)
self.ADD3 = self.contextMenu3.addAction('Add')
self.DELE3 = self.contextMenu3.addAction('Delete')
self.ADD3.triggered.connect(lambda: self.table_add(self.table_fit_sci))
self.DELE3.triggered.connect(lambda: self.table_delete(self.table_fit_sci))
self.table_fit_semi_p.setContextMenuPolicy(Qt.CustomContextMenu) ######
self.table_fit_semi_p.customContextMenuRequested.connect(self.showCustomMenu4) ####
self.contextMenu4 = QMenu(self)
self.ADD4 = self.contextMenu4.addAction('Add')
self.DELE4 = self.contextMenu4.addAction('Delete')
self.ADD4.triggered.connect(lambda: self.table_add(self.table_fit_semi_p))
self.DELE4.triggered.connect(lambda: self.table_delete(self.table_fit_semi_p))
# --------
# Table 5
self.table_data_pgnaa.setContextMenuPolicy(Qt.CustomContextMenu) ######
self.table_data_pgnaa.customContextMenuRequested.connect(self.showCustomMenuPgaa) ####
self.contextMenuPgaa = QMenu(self)
self.Show = self.contextMenuPgaa.addAction('Show selected lines')
self.Show.triggered.connect(lambda: self.table_show(self.table_data_pgnaa))
# --------
# Table 6
self.table_data_decay.setContextMenuPolicy(Qt.CustomContextMenu) ######
self.table_data_decay.customContextMenuRequested.connect(self.showCustomMenuDecay) ####
self.contextMenuDecay = QMenu(self)
self.Show1 = self.contextMenuDecay.addAction('Show selected lines')
self.Show1.triggered.connect(lambda: self.table_show(self.table_data_decay))
# =======================================================================================
# slot connect
# ----- toolbar -----
self.pushButton_11.clicked.connect(lambda: self.slot_btn_fig_zoom())
self.pushButton_12.clicked.connect(lambda: self.slot_btn_fig_zoom_back())
self.pushButton_13.clicked.connect(lambda: self.slot_btn_fig_zoom_home())
self.pushButton_14.clicked.connect(lambda: self.slot_btn_fig_parameters())
self.pushButton_18.clicked.connect(lambda: self.slot_btn_fig_save())
self.pushButton_17.clicked.connect(lambda: self.slot_btn_data_back())
self.pushButton_16.clicked.connect(lambda: self.slot_btn_data_home())
self.pushButton_15.clicked.connect(lambda: self.slot_btn_energy_show())
# ----- start -----
self.pushButton_8.clicked.connect(lambda: self.slot_btn_open_file())
self.pushButton_9.clicked.connect(lambda: self.slot_btn_save_file())
self.pushButton_10.clicked.connect(lambda: self.slot_btn_open_energy_file())
# ----- evaluation -----
self.pbt_eva_cali_linear_do.clicked.connect(lambda: self.slot_btn_eva_cali_linear_do())
self.pbt_eva_cali_linear_save.clicked.connect(lambda: self.slot_btn_eva_cali_linear_save())
self.pbt_eva_cali_nonlinear_do.clicked.connect(lambda: self.slot_btn_eva_cali_nonlinear_do())
self.pbt_eva_cali_nonlinear_save.clicked.connect(lambda: self.slot_btn_eva_cali_nonlinear_save())
self.pbt_eva_fwhm_mc_do.clicked.connect(lambda: self.slot_btn_eva_fwhm_mcnp())
self.pbt_eva_fwhm_cr_do.clicked.connect(lambda: self.slot_btn_eva_fwhm_cear())
self.pbt_eva_fwhm_ln_do.clicked.connect(lambda: self.slot_btn_eva_fwhm_linear())
# ----- process -----
self.pbt_proc_findp_auto_do.clicked.connect(lambda: self.slot_btn_proc_findp_auto())
self.pbt_proc_findp_auto_do_2.clicked.connect(lambda: self.slot_btn_proc_findp_auto_cwt())
self.pbt_proc_denoi_fit_do.clicked.connect(lambda: self.slot_btn_proc_fit())
self.pbt_proc_denoi_wt_do.clicked.connect(lambda: self.slot_btn_proc_wavelet())
self.pbt_proc_denoi_lp_do.clicked.connect(lambda: self.slot_btn_proc_lowpass())
self.pbt_proc_denoi_emd1.clicked.connect(lambda: self.slot_btn_proc_emd_1())
self.pbt_proc_denoi_emd_do.clicked.connect(lambda: self.slot_btn_proc_emd_do())
self.pbt_proc_sta_3rp_do.clicked.connect(lambda: self.slot_btn_proc_sta_3rp_do())
self.pbt_proc_sta_2rp_do.clicked.connect(lambda: self.slot_btn_proc_sta_2rp_do())
# ----- FSA -----
self.pbt_fsa_lls_lib.clicked.connect(lambda: self.slot_btn_fsa_lls_lib())
self.pbt_fsa_lls_compo.clicked.connect(lambda: self.slot_btn_fsa_lls_compos())
self.pbt_fsa_lls_spe.clicked.connect(lambda: self.slot_btn_fsa_lls_spec())
self.pbt_fsa_lls_solve.clicked.connect(lambda: self.slot_btn_fsa_lls_solve())
self.pbt_fsa_wlls_lib.clicked.connect(lambda: self.slot_btn_fsa_wlls_lib())
self.pbt_fsa_wlls_cont.clicked.connect(lambda: self.slot_btn_fsa_wlls_compos())
self.pbt_fsa_wlls_spe.clicked.connect(lambda: self.slot_btn_fsa_wlls_spec())
self.pbt_fsa_wlls_w.clicked.connect(lambda: self.slot_btn_fsa_wlls_weight())
self.pbt_fsa_wlls_solve.clicked.connect(lambda: self.slot_btn_fsa_wlls_solve())
self.pbt_fsa_pca_lib.clicked.connect(lambda: self.slot_btn_fsa_pca_lib())
self.pbt_fsa_pca_cont.clicked.connect(lambda: self.slot_btn_fsa_pca_compos())
self.pbt_fsa_pca_spe.clicked.connect(lambda: self.slot_btn_fsa_pca_spec())
self.pbt_fsa_pca_do.clicked.connect(lambda: self.slot_btn_fsa_pca_solve())
# ----- fitting -----
self.pbt_fit_sci_do.clicked.connect(lambda: self.slot_btn_fit_sci_do())
self.pbt_fit_sci_do_2.clicked.connect(lambda: self.slot_btn_fit_semi_do())
# ----- simulation -----
self.radioButton.clicked.connect(lambda: self.slot_rbn_simu_edep())
self.radioButton_2.clicked.connect(lambda: self.slot_rbn_simu_spec())
self.rbt_simu_broaden_conv.clicked.connect(lambda: self.slot_rbn_simu_spec_conv())
self.pbt_simu_bro_mc_do.clicked.connect(lambda: self.slot_btn_simu_bro_mc_do())
self.pbt_simu_bro_mc_draw.clicked.connect(lambda: self.slot_btn_simu_bro_mc_draw())
self.pbt_simu_bro_cr_do.clicked.connect(lambda: self.slot_btn_simu_bro_cr_do())
self.pbt_simu_bro_cr_draw.clicked.connect(lambda: self.slot_btn_simu_bro_cr_draw())
self.pbt_simu_bro_ln_do.clicked.connect(lambda: self.slot_btn_simu_bro_ln_do())
self.pbt_simu_bro_ln_draw.clicked.connect(lambda: self.slot_btn_simu_bro_ln_draw())
self.pbt_simu_mcr_open.clicked.connect(lambda: self.slot_btn_simu_mcr_open())
self.pbt_simu_mcr_save.clicked.connect(lambda: self.slot_btn_simu_mcr_save())
# ----- database -----
self.pbt_data_pgaa_do.clicked.connect(lambda: self.slot_btn_data_pgnaa_do())
self.pbt_data_decay_do.clicked.connect(lambda: self.slot_btn_data_decay_do())
self.pbt_data_iso_do.clicked.connect(lambda: self.slot_btn_data_iso_do())
# =======================================================================================
# setting shadow
self.effect_shadow = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow.setOffset(0, 0) # 偏移
self.effect_shadow.setBlurRadius(10) # 阴影半径
self.effect_shadow.setColor(QtCore.Qt.gray) # 阴影颜色
self.pushButton_8.setGraphicsEffect(self.effect_shadow)
self.effect_shadow2 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow2.setOffset(0, 0) # 偏移
self.effect_shadow2.setBlurRadius(10) # 阴影半径
self.effect_shadow2.setColor(QtCore.Qt.gray) # 阴影颜色
self.pushButton_9.setGraphicsEffect(self.effect_shadow2)
self.effect_shadow3 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow3.setOffset(0, 0) # 偏移
self.effect_shadow3.setBlurRadius(10) # 阴影半径
self.effect_shadow3.setColor(QtCore.Qt.gray) # 阴影颜色
self.pushButton_10.setGraphicsEffect(self.effect_shadow3)
self.effect_shadow4 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow4.setOffset(0, 0) # 偏移
self.effect_shadow4.setBlurRadius(10) # 阴影半径
self.effect_shadow4.setColor(QtCore.Qt.gray) # 阴影颜色
self.label_4.setGraphicsEffect(self.effect_shadow4)
self.effect_shadow5 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow5.setOffset(0, 0) # 偏移
self.effect_shadow5.setBlurRadius(10) # 阴影半径
self.effect_shadow5.setColor(QtCore.Qt.gray) # 阴影颜色
self.label_17.setGraphicsEffect(self.effect_shadow5)
self.effect_shadow6 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow6.setOffset(0, 0) # 偏移
self.effect_shadow6.setBlurRadius(10) # 阴影半径
self.effect_shadow6.setColor(QtCore.Qt.gray) # 阴影颜色
self.textEdit_8.setGraphicsEffect(self.effect_shadow6)
self.effect_shadow7 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow7.setOffset(0, 0) # 偏移
self.effect_shadow7.setBlurRadius(10) # 阴影半径
self.effect_shadow7.setColor(QtCore.Qt.gray) # 阴影颜色
self.tabWidget_2.setGraphicsEffect(self.effect_shadow7)
self.effect_shadow8 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow8.setOffset(0, 0) # 偏移
self.effect_shadow8.setBlurRadius(10) # 阴影半径
self.effect_shadow8.setColor(QtCore.Qt.gray) # 阴影颜色
self.tabWidget_3.setGraphicsEffect(self.effect_shadow8)
self.effect_shadow9 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow9.setOffset(0, 0) # 偏移
self.effect_shadow9.setBlurRadius(10) # 阴影半径
self.effect_shadow9.setColor(QtCore.Qt.gray) # 阴影颜色
self.tabWidget_4.setGraphicsEffect(self.effect_shadow9)
self.effect_shadow10 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow10.setOffset(0, 0) # 偏移
self.effect_shadow10.setBlurRadius(10) # 阴影半径
self.effect_shadow10.setColor(QtCore.Qt.gray) # 阴影颜色
self.tabWidget_5.setGraphicsEffect(self.effect_shadow10)
self.effect_shadow11 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow11.setOffset(0, 0) # 偏移
self.effect_shadow11.setBlurRadius(10) # 阴影半径
self.effect_shadow11.setColor(QtCore.Qt.gray) # 阴影颜色
self.tabWidget_6.setGraphicsEffect(self.effect_shadow11)
self.effect_shadow12 = QtWidgets.QGraphicsDropShadowEffect(self)
self.effect_shadow12.setOffset(0, 0) # 偏移
self.effect_shadow12.setBlurRadius(10) # 阴影半径
self.effect_shadow12.setColor(QtCore.Qt.gray) # 阴影颜色
self.tabWidget.setGraphicsEffect(self.effect_shadow12)
# =======================================================================================
# canvas config
self.fig = plt.figure(figsize=(5, 10))
self.canvas = FC(self.fig)
# self.ax = self.fig.add_subplot(111)
self.ax = self.fig.subplots()
self.fig.subplots_adjust(top=0.93,bottom=0.08,left=0.05,right=0.98,hspace=0,wspace=0)
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
self.gridlayout = QGridLayout(self.label_4) # 继承容器groupBox
self.gridlayout.addWidget(self.canvas, 0, 1)
self.toolbar = NavigationToolbar(self.canvas, self)
self.toolbar.hide()
self.canvas.mpl_connect("motion_notify_event",self.slot_show_data)
# =======================================================================================
# global parameters
self.spec_now = np.zeros(0)
self.spec_last = np.zeros(0)
self.spec_ori = np.zeros(0)
self.energy = np.zeros(0)
self.energy_curve_nonlinear = np.zeros(0)
self.energy_curve_linear = np.zeros(0)
self.fwhm_curve = np.zeros(0)
self.flag_energy = False
self.flag_open_file = False
self.flas_simu_broaden = 0 # broaden algorithm type 0: sampling 1: conv
self.simu_bro_edep_fp = ""
# ===================================================================================
# 1 SLOT table operate
def showCustomMenu(self):
self.contextMenu.exec_(QCursor.pos()) # 在鼠标位置显示
def showCustomMenu1(self):
self.contextMenu1.exec_(QCursor.pos()) # 在鼠标位置显示
def showCustomMenu2(self):
self.contextMenu2.exec_(QCursor.pos()) # 在鼠标位置显示
def showCustomMenu3(self):
self.contextMenu3.exec_(QCursor.pos()) # 在鼠标位置显示
def showCustomMenu4(self):
self.contextMenu4.exec_(QCursor.pos()) # 在鼠标位置显示
def showCustomMenuPgaa(self):
self.contextMenuPgaa.exec_(QCursor.pos()) # 在鼠标位置显示
def showCustomMenuDecay(self):
self.contextMenuDecay.exec_(QCursor.pos()) # 在鼠标位置显示
def slot_cbx_fsa_wlls_change(self):
try:
if self.cbx_fsa_wlls_type.currentText()=="std":
self.pbt_fsa_wlls_w.setEnabled(False)
self.let_fsa_wlls_weight.setEnabled(False)
else:
self.pbt_fsa_wlls_w.setEnabled(True)
self.let_fsa_wlls_weight.setEnabled(True)
if len(self.let_fsa_wlls_lib.text())!=0 and len(self.let_fsa_wlls_cont.text())!=0 and len(self.let_fsa_wlls_spe.text())!=0:
if self.cbx_fsa_wlls_type.currentText() == "std":
self.pbt_fsa_wlls_solve.setEnabled(True)
elif len(self.let_fsa_wlls_weight.text()) != 0 and self.cbx_fsa_wlls_type.currentText() == "else":
self.pbt_fsa_wlls_solve.setEnabled(True)
else:
self.pbt_fsa_wlls_solve.setEnabled(False)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def table_add(self, table):
try:
cur_total_row = table.rowCount()
table.setRowCount(cur_total_row+1)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def table_delete(self, table):
try:
selected_indexs = table.selectedIndexes()
if len(selected_indexs) == 0:
return
indexs = []
for i in range(len(selected_indexs)):
index_now = selected_indexs[len(selected_indexs)-i-1].row()
if index_now not in indexs:
indexs.append(index_now)
table.removeRow(index_now)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def table_show(self, table):
try:
selected_indexs = table.selectedIndexes()
if len(selected_indexs) == 0:
return
indexs = []
Egamma = []
sigma = []
for i in range(len(selected_indexs)):
index_now = selected_indexs[len(selected_indexs) - i - 1].row()
if index_now not in indexs:
indexs.append(index_now)
Egamma.append(self.dataBase[index_now,5])
sigma.append(self.dataBase[index_now,7])
self.canvas_update_database(Egamma, sigma)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
# ===================================================================================
# 2 SLOT canvas
# ----- canvas -----
def canvas_update(self):
self.ax.cla()
# self.fig.clf()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
# print(len(self.spec_now))
# print(len(self.spec_last))
if self.pushButton_15.isChecked():
if len(self.spec_last) == 0: # when there is no spec_last
self.ax.plot(self.energy,self.spec_now, color="red", label="Now")
self.ax.legend()
self.canvas.draw()
else:
self.ax.plot(self.energy,self.spec_last, color="blue", label="Last")
self.ax.plot(self.energy,self.spec_now, color="red", label="Now")
self.ax.legend()
self.canvas.draw()
else:
if len(self.spec_last) == 0: # when there is no spec_last
self.ax.plot(self.spec_now, color="red", label="Now")
self.ax.legend()
self.canvas.draw()
else:
self.ax.plot(self.spec_last, color="blue", label="Last")
self.ax.plot(self.spec_now, color="red", label="Now")
self.ax.legend()
self.canvas.draw()
def canvas_update_fsa_lib(self,lib):
m,n = np.shape(lib)
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
if self.pushButton_15.isChecked():
for i in range(n):
self.ax.plot(self.energy, lib[:,i])
self.ax.legend()
self.canvas.draw()
else:
for i in range(n):
self.ax.plot(lib[:, i])
self.ax.legend()
self.canvas.draw()
def canvas_update_fsa_spec(self,spec):
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
if self.pushButton_15.isChecked():
self.ax.plot(self.energy, spec)
self.ax.legend()
self.canvas.draw()
else:
self.ax.plot(spec)
self.ax.legend()
self.canvas.draw()
def canvas_update_findp(self,peaks):
self.ax.cla()
# self.fig.clf()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
# print(len(self.spec_now))
# print(len(self.spec_last))
if self.pushButton_15.isChecked():
self.ax.plot(self.energy, self.spec_now, color="red", label="Now")
for i in range(len(peaks)):
self.ax.axvline(x=self.energy[peaks[i]],color="green")
self.ax.legend()
self.canvas.draw()
else:
self.ax.plot(self.spec_now, color="red", label="Now")
for i in range(len(peaks)):
self.ax.axvline(x=peaks[i],color="green")
self.ax.legend()
self.canvas.draw()
def canvas_update_energy_curve(self,energy_curve):
self.ax.cla()
# self.fig.clf()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.plot(energy_curve,color="red",label="Energy Curve")
self.ax.legend()
self.canvas.draw()
def canvas_update_fit_result(self,points_x,points_y,curve_x,curve_y):
self.ax.cla()
# self.fig.clf()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.scatter(points_x,points_y,color="red",label="Points")
self.ax.plot(curve_x,curve_y,color="green",label="Curve")
self.ax.legend()
self.canvas.draw()
def canvas_update_database(self,egamma,sigma):
if len(self.spec_now)==0:
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
for i in range(len(egamma)):
self.ax.plot([egamma[i],egamma[i]],[0,sigma[i]],color="red")
self.canvas.draw()
else:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
max_in_plot = np.max(self.spec_now)
sigma = sigma/np.max(sigma)*max_in_plot
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.plot(self.energy,self.spec_now,color="blue")
for i in range(len(egamma)):
self.ax.plot([egamma[i],egamma[i]],[0,sigma[i]],color="red")
self.canvas.draw()
# ===================================================================================
# 3 SLOT information out
# ----- Do -----
def cmd_out(self,strings):
self.label_17.setText(strings)
# ----- Do -----
def info_out(self, strings):
self.textEdit_8.setText(strings)
# =======================================================
# 4 SLOT toolbar setting
def slot_btn_fig_zoom(self):
try:
self.toolbar.zoom()
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_fig_zoom_back(self):
try:
self.toolbar.back()
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_fig_zoom_home(self):
try:
self.toolbar.home()
except BaseException as e:
print(e.__str__())
# self.ax.plot([1, 2, 3, 4], [5, 5, 5, 5])
# self.canvas.draw()
def slot_btn_fig_parameters(self):
try:
self.toolbar.edit_parameters()
except BaseException as e:
print(e.__str__())
def slot_btn_fig_save(self):
try:
self.toolbar.save_figure()
except BaseException as e:
print(e.__str__())
def slot_btn_data_back(self):
try:
if len(self.spec_last) != 0:
self.spec_now = self.spec_last + 0
self.spec_last = np.zeros(0)
self.canvas_update()
self.cmd_out("DO: 已清除上一步操作")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_data_home(self):
try:
self.spec_now = self.spec_ori + 0
self.spec_last = np.zeros(0)
self.canvas_update()
self.cmd_out("DO: 已返回原始能谱")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_energy_show(self):
try:
if len(self.energy) == 0:
QMessageBox.warning(self,
"Error!",
"Please choose the Energy file")
self.pushButton_15.setChecked(False)
else:
self.canvas_update()
self.cmd_out("DO: Show the Energy")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_show_data(self, event):
try:
info = " " + str(int(event.xdata * 10000) / 10000) + " " + str(int(event.ydata * 10000) / 10000)
self.lbl_canvas_data.setText(info)
except BaseException as e:
pass
# ===================================================================================
# 5 SLOT Start
# ----- open the spectrum file -----
def slot_btn_open_file(self):
try:
self.spec_now = np.zeros(0)
self.spec_last = np.zeros(0)
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
"Open the spectrum",
"./", # original path
"txt (*.txt);;chn (*.Chn);;spe (*.Spe);;mca (*.mca);;tka (*.TKA);;All Files (*)"
) # filetype filter
if fileName_choose == "":
return
gr = geRWIO(fileName_choose,filetype.split(" ")[0])
self.spec_now = gr.spec
self.spec_ori = self.spec_now+0
self.flag_open_file = True
if filetype.split(" ")[0]!="txt" and filetype.split(" ")[0]!="tka":
self.energy = gr.energy_list
self.canvas_update()
self.cmd_out("DO: Open the spectrum: "+fileName_choose)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
# ----- open the spectrum file -----
def slot_btn_save_file(self):
try:
if len(self.spec_now)==0:
QMessageBox.warning(self,
"Error!",
"There is no spectrum")
return
file_path,file_type = QFileDialog.getSaveFileName(self, "Save the spectrum",
"./spectrum",
"Text Files (*.txt);;all files(*.*)")
if file_path == "":
# print("\n取消选择")
return
np.savetxt(file_path, self.spec_now)
self.cmd_out("DO: Save spectrum at " + file_path)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
# ----- open the energy file -----
def slot_btn_open_energy_file(self):
try:
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
"Choose energy file",
"./", # 起始路径
"All Files (*);;Text Files (*.txt)") # 设置文件扩展名过滤,用双分号间隔
if fileName_choose == "":
# print("\n取消选择")
return
# label show
self.energy = np.loadtxt(fileName_choose)
self.flag_energy = True
# self.canvas_update()
self.cmd_out("DO: Open the energy file "+fileName_choose)
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
# ===================================================================================
# 6 SLOT Evaluation
# 1.calibration
def slot_btn_eva_cali_linear_do(self):
try:
if len(self.spec_now)==0:
QMessageBox.warning(self,
"Error!",
"Please choose spectrum")
return
peak1 = [int(self.let_eva_cali_linear_p1.text()),float(self.let_eva_cali_linear_e1.text())]
peak2 = [int(self.let_eva_cali_linear_p2.text()), float(self.let_eva_cali_linear_e2.text())]
k = (peak2[1]-peak1[1])/(peak2[0]-peak1[0])
b = peak1[1]-k*peak1[0]
self.energy_curve_linear = np.zeros(len(self.spec_now))
for i in range(len(self.spec_now)):
self.energy_curve_linear[i] = k*i+b
info = "Finish the FWHM calibration\n"
info = info + "=============\n"
info = info + "The curve is: E = k*Chan+b\n"
info = info + "=============\n"
info = info+"k: "+str(k)+"\n" + "b: "+str(b)+"\n" + "=============\n"
self.info_out(info)
self.canvas_update_energy_curve(self.energy_curve_linear)
self.cmd_out("DO: Finish the linear energy curve's calibration")
pass
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_eva_cali_linear_save(self):
try:
if len(self.energy_curve_linear)==0:
QMessageBox.warning(self,
"Error!",
"Please calibration first")
return
file_path,filetype = QFileDialog.getSaveFileName(self, "Save the energy curve","C:/Users/Administrator/Desktop/energy_linear",
"Text Files (*.txt);;all files(*.*)")
print(file_path)
if file_path=="":
# print("\n取消选择")
return
np.savetxt(file_path,self.energy_curve_linear)
self.cmd_out("DO: Save the linear energy curve at "+file_path)
pass
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_eva_cali_nonlinear_do(self):
try:
if len(self.spec_now)==0:
QMessageBox.warning(self,
"Error!",
"Please choose spectrum")
return
p1, e1 = int(self.let_eva_cali_nonlinear_p1.text()), float(self.let_eva_cali_nonlinear_e1.text())
p2, e2 = int(self.let_eva_cali_nonlinear_p2.text()), float(self.let_eva_cali_nonlinear_e2.text())
p3, e3 = int(self.let_eva_cali_nonlinear_p4.text()), float(self.let_eva_cali_nonlinear_e3.text())
arr1 = | np.array([p1**2,p2**2,p3**2]) | numpy.array |
#This is the basis for an N-Body Code created by
#<NAME> and <NAME>
import numpy as np
import matplotlib.pyplot as plt
GravitationalConstant = 6.67408e-11
class body: # universal body container
def __init__ (self, mass, position, velocity): # body creation
self.m = mass
self.x = position
self.v = velocity
self.a = np.array([0,0,0])
def distance (self, otherbody): # distance to another body calculator
r = self.x - otherbody.x
r = np.sqrt(np.dot(r, r))
return r
def gravitate (self, otherbody): # two body gravitation acceleration calculator
r= self.x - otherbody.x
r = r*((np.dot(r,r))**(-1.5))
self.a += -GravitationalConstant*otherbody.m*r
otherbody.a += GravitationalConstant*self.m*r
def resetacc(self): # acceleration reset procedure
self.a = [0,0,0]
def velocity2(self): # V^2
return np.dot(self.v,self.v)
def magnitude(vector):
return np.sqrt(np.dot(vector,vector))
def orbitalcalc(ecc, perihelium, msun): # orbit parameters
return np.sqrt(GravitationalConstant*msun*(1+ecc)/(perihelium))
def initialiser(total_time, steps_per_year, mode, integration_scheme, par1 ='+'):
global N,SS,dt,endtime,eccen #total num of bodies and the solar system
if mode=='kozai':
N=3
vel=orbitalcalc(0.5, 1.496e10, 1.989e30)
SS=np.array([body(1.989e30,np.array([0.,0.,0.]),np.array([0.,0.,0.])),
body(1.989e29,np.array([1.496e11,0.,0.]),np.array([0.,2.979e4,0.])),
body(5.972e24,np.array([-1.496e10,0,0]),np.array([0,-vel*0.5,vel*(np.sqrt(3)/2)]))])
elif mode=='cluster1':
N=30
#Define parsec in metres
parsec=3.0857e16
solarmass=1.989e30
radius=0.25*parsec
SS=[]
for i in range(N):
#Random position in spherical coordinates
pos=np.random.random_sample((3,))*np.array([radius,np.pi,2*np.pi])
posx=pos[0]*np.sin(pos[1])* | np.cos(pos[2]) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program for
Created on %(date)
@author : trismonock
@mail : <EMAIL>
"""
# import standard modules
import os
os.environ["OPENBLAS_NUM_THREADS"] = "3" # openblas thread number (problem for Python 3)
import numpy as np
import glob
from numpy.linalg import inv
# get and define the working directory
base_dir = os.getcwd() + "/"
os.chdir(base_dir)
# define dirs
input_dir = os.path.realpath(base_dir + "../../SIM_RET/OUTPUT") + "/"
output_dir = os.path.realpath(base_dir + "../OUTPUT") + "/"
# create dir
try:
os.makedirs(output_dir)
except OSError:
pass
# define cases
stat_list = glob.glob(input_dir + "statistics*.dat")
stat_list = np.sort(stat_list)
nfiles = len(stat_list)
print(stat_list)
print(nfiles)
# loop over pixels
for i in range(nfiles):
# print statement
print("Info | Executing error analysis pixel id : ", i+1)
# define init string
kmat_list = glob.glob(input_dir + "kmat_" + str(i+1).zfill(5) + "_*.dat")
# define number of iteration
nkmat = len(kmat_list)
# define filename
kmat_file = input_dir + "kmat_" + str(i+1).zfill(5) + "_" + str(nkmat).zfill(2) + ".dat"
# open and read data
data = | np.loadtxt(kmat_file) | numpy.loadtxt |
"""
Colorizing the text mask.
Change the original code to Python3 support and simplifed the code structure.
Original project: https://github.com/ankush-me/SynthText
Author: <NAME>
Date: 2015
"""
import cv2
import numpy as np
import copy
import matplotlib.pyplot as plt
import scipy.interpolate as si
import scipy.ndimage as scim
import scipy.ndimage.interpolation as sii
import os
import os.path as osp
import pickle as cp
from PIL import Image
import random
from . import poisson_reconstruct
class Layer(object):
def __init__(self, alpha, color):
# alpha for the whole image:
assert alpha.ndim == 2
self.alpha = alpha
[n,m] = alpha.shape[:2]
color = np.atleast_1d( | np.array(color) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2022 Madureira, Brielen
# SPDX-License-Identifier: MIT
"""
An object to load datasets and create the datapoints for each set.
It loads probes from JSON files and representations from h5 files
and then constructs datasets for the probing classifier task.
"""
import json
import pickle
from collections import Counter, defaultdict
import h5py
import numpy as np
from torch.utils.data import Dataset
from aux import (get_reps_path, get_probes_path, get_test_lens_path,
get_embs_path)
from tasks import get_task_labels
# fixed number of turns in all train/valid dialogues
VISDIAL_LEN = 11
class ProbingDataset(Dataset):
"""Build a dataset split."""
def __init__(self, params, split):
"""
Args:
params (dataclass): All parameters of the experiment.
split (str): Train, valid or test.
"""
self.params = params
self.split = split
self.labels, self.label_names = self._define_labels()
self.representations = self._load_representations()
self.label_counter = Counter()
self.datapoints = {}
self._create_datapoints()
def __len__(self):
"""Return number of datapoints."""
return len(self.datapoints)
def __getitem__(self, index):
"""Retrieve a datapoint given its index."""
dialogue_id, _, sent_id, turn, label = self.datapoints[index]
sent_embedding = self.id2sent[sent_id]
representation = self.representations[dialogue_id, turn]
return (index, representation, sent_embedding, label)
def _define_labels(self):
"""Get labels and their names according to main task."""
return get_task_labels(self.params.bot, self.params.task)
def _load_lens(self):
"""Return dict of dialogue lens, which is constant if not test set."""
if self.split != 'test':
return defaultdict(lambda: VISDIAL_LEN)
path = get_test_lens_path()
with open(path, 'r') as f:
lens = {}
for line in f.readlines():
idx, length = line.strip('\n').split('\t')
lens[idx] = int(length)
return lens
def _load_representations(self):
"""Load dialogue representations."""
path = get_reps_path(self.params, self.split)
name = f'{self.split}_dialogue_representations'
representations = np.array(h5py.File(path, 'r').get(name))
# Define control task
if self.params.control_task == 'rand-reps' and self.split == 'train':
# replace representations by random vectors
np.random.seed(self.params.random_seed)
r_mean = | np.mean(representations) | numpy.mean |
import unittest
import numpy as np
from . import plot
import funcsfa
class TestInvalidInputs(unittest.TestCase):
def setUp(self):
self.rand = np.random.RandomState(1968486074)
self.n_factors = 9
self.f = funcsfa.SFA()
self.n_samples = 221
self.n_features = 37
self.X_a = self.rand.normal(0, 1, (self.n_samples, 30))
self.X_b = self.rand.normal(0, 1, (self.n_samples, 7))
self.data_one = funcsfa.DataMatrix(self.X_a)
self.data_two = funcsfa.StackedDataMatrix([
funcsfa.DataMatrix(self.X_a),
funcsfa.DataMatrix(self.X_b)])
def test_l1_penalty_length_one_dt(self):
self.f.fit(self.data_one, self.n_factors, max_iter=0, l1=0.0)
self.f.fit(self.data_one, self.n_factors, max_iter=0, l1=0.0, l2=0.0)
self.f.fit(self.data_one, self.n_factors, max_iter=0, l1=[0.0], l2=0.0)
self.f.fit(self.data_one, self.n_factors, max_iter=0, l1=[0.0],
l2=[0.0])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l1=[0.0, 0.1])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l1=[])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l1=[0.1, 0.2], l2=[0.1])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l1=[0.1, 0.2], l2=0.1)
def test_l2_penalty_length_one_dt(self):
self.f.fit(self.data_one, self.n_factors, max_iter=0, l1=0.0, l2=[0.0])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l2=[0.0, 0.1])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l2=[])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l2=[0.1, 0.2], l1=[0.1])
with self.assertRaises(Exception):
self.f.fit(self.data_one, self.n_factors, max_iter=0,
l2=[0.1, 0.2], l1=0.1)
def test_more_factors_than_features(self):
with self.assertRaises(Exception):
self.f.fit(self.data_two, self.data_two.dt_n_features[0]+1,
max_iter=0)
with self.assertRaises(Exception):
self.f.fit(self.data_two, self.data_two.dt_n_features[1]+1,
max_iter=0)
def test_invalid_transform(self):
f = funcsfa.SFA()
with self.assertRaises(Exception):
f.transform(self.data_one)
f.fit(self.data_one, self.n_factors, max_iter=10)
f.transform(self.data_one)
f.transform(self.data_one)
with self.assertRaises(Exception):
f.transform(self.data_two)
with self.assertRaises(Exception):
f.transform(self.data_two.data)
class TestSingleDatatypeReproduceRandom(unittest.TestCase):
def setUp(self):
self.n_samples = 400
self.n_features = 2000
self.n_factors = 10
self.rand = np.random.RandomState(1968486074)
self.B = self.rand.normal(0, 1, (self.n_features, self.n_factors))
Zvar = np.linspace(10, 1, self.n_factors)
Zvar = Zvar / np.mean(Zvar)
self.Z = self.rand.normal(0, np.sqrt(Zvar),
(self.n_samples, self.n_factors))
self.X = np.dot(self.Z, self.B.T)
self.data = funcsfa.DataMatrix(self.X)
self.f = funcsfa.SFA()
def test_init_full_factors_output_shapes(self):
Z_estimated = self.f.fit_transform(self.data, self.n_factors,
max_iter=0)
self.assertEqual(Z_estimated.shape, (self.n_samples, self.n_factors))
self.assertEqual(self.f.coefficients[0].shape,
(self.n_features, self.n_factors))
assert not np.any(np.isnan(Z_estimated))
assert not np.any(np.isnan(self.f.coefficients[0]))
def test_init_full_factors_reconstruction(self):
Z_estimated = self.f.fit_transform(self.data, self.n_factors,
max_iter=0)
self.assertAlmostEqual(self.f.reconstruction_error, 0.0)
X_reconstructed = (np.dot(Z_estimated, self.f.coefficients[0].T) +
np.mean(self.X, 0, keepdims=True))
np.testing.assert_allclose(self.X, X_reconstructed)
def test_init_full_factors_constraints(self):
Z_estimated = self.f.fit_transform(self.data, self.n_factors,
max_iter=0)
np.testing.assert_allclose(1, np.mean(Z_estimated ** 2))
def test_init_part_factors_output_shapes(self):
n_factors = self.n_factors // 2
Z_estimated = self.f.fit_transform(self.data, n_factors, max_iter=0)
self.assertEqual(Z_estimated.shape, (self.n_samples, n_factors))
self.assertEqual(self.f.coefficients[0].shape,
(self.n_features, n_factors))
assert not np.any(np.isnan(Z_estimated))
assert not np.any(np.isnan(self.f.coefficients[0]))
def test_init_part_factors_constraints(self):
n_factors = self.n_factors // 2
Z_estimated = self.f.fit_transform(self.data, n_factors, max_iter=0)
np.testing.assert_allclose(1, np.mean(Z_estimated ** 2))
Z_estimated2 = self.f.transform(self.data)
np.testing.assert_allclose(1, | np.mean(Z_estimated2 ** 2) | numpy.mean |
import numpy as np
def IoU(window1: np.ndarray, window2: np.ndarray) -> np.ndarray:
"""
:param window1: shape=(N, 2)
:param window2: shape=(N, 2)
:return IoUs: shape=(N,)
"""
intersection_size = np.maximum(
0,
| np.minimum(window1[:,1], window2[:,1]) | numpy.minimum |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import librosa.display
import numpy as np
import subprocess
import librosa
import pysrt
import sys
import os
import re
DIRNAME = os.path.dirname(os.path.realpath(__file__))
TRAIN_DIR = os.path.join(DIRNAME, 'training')
FREQ = 16000 # Audio frequency
N_MFCC = 13
HOP_LEN = 512.0 # Num of items per sample
# 1 item = 1/16000 seg = 32 ms
ITEM_TIME = HOP_LEN/FREQ
if not os.path.exists(TRAIN_DIR):
print("missing training data in directory:", TRAIN_DIR)
sys.exit(1)
# Convert timestamp to seconds
def timeToSec(t):
total_sec = float(t.milliseconds)/1000
total_sec += t.seconds
total_sec += t.minutes*60
total_sec += t.hours*60*60
return total_sec
# Return timestamp from cell position
def timeToPos(t, freq=FREQ, hop_len=HOP_LEN):
return round(timeToSec(t)/(hop_len/freq))
"""
Uses ffmpeg to transcode and extract audio from movie files in the training
directory. Function returns a list of tuples; the .wav files and corresponding
.srt files to processing
"""
def transcode_audio(dir=TRAIN_DIR):
files = os.listdir(dir)
p = re.compile('.*\.[mkv|avi|mp4]')
files = [ f for f in files if p.match(f) ]
training = []
for f in files:
name, extension = os.path.splitext(f)
input = os.path.join(dir, f)
output = os.path.join(dir, name + '.wav')
srt = os.path.join(dir, name + '.srt')
if not os.path.exists(srt):
print("missing subtitle for training:", srt)
sys.exit(1)
training.append((output, srt))
if os.path.exists(output):
continue
print("Transcoding:", input)
command = "ffmpeg -y -i {0} -ab 160k -ac 2 -ar {2} -vn {1}".format(input, output, FREQ)
code = subprocess.call(command, stderr=subprocess.DEVNULL, shell=True)
if code != 0:
raise Exception("ffmpeg returned: {}".format(code))
return training
"""
Extracts the features and labels from the .wav and .srt file. The audio is
processed using MFCC. Returns a tuple where the first element is the MFCC data
and the second argument is the labels for the data.
"""
def extract_features(files=None):
if files is None:
files = transcode_audio()
audio = []
labels = []
for (wav, srt) in files:
print("Processing audio:", wav)
y, sr = librosa.load(wav, sr=FREQ)
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=int(HOP_LEN), n_mfcc=int(N_MFCC))
label = extract_labels(srt, len(mfcc[0]))
audio.append(mfcc)
labels.append(label)
return audio, labels
"""
Processes a .srt file and returns a numpy array of labels for each sample. If
there is a subtitle at the i'th sample, there is a 1 at position i, else 0.
"""
def extract_labels(srt, samples):
subs = pysrt.open(srt)
labels = | np.zeros(samples) | numpy.zeros |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ...pvtpy.black_oil import Pvt,Oil,Water,Gas
from scipy.optimize import root_scalar
from .inflow import OilInflow, GasInflow
from ...utils import intercept_curves
from typing import Union
## Incompressible pressure drop
def potential_energy_change(
z1:Union[int,float]=None,
z2=None,
delta_z=None,
length=None,
ge=1,
angle=None,
inc=None,
p1=0):
"""potential_energy_change [ Δp PE accounts for the pressure change due to the weight of the column of fluid (the hydrostatic head); it
will be zero for flow in a horizontal pipe.
In this equation, Δz is the difference in elevation between positions 1 and 2, with z increasing upward. θ
is defined as the angle between horizontal and the direction of flow. Thus, θ is +90° for upward, vertical
flow, 0° for horizontal flow, and –90° for downward flow in a vertical well (Figure 7-4). For flow in a
straight pipe of length L with flow direction θ,]
Parameters
----------
z1 : [type], optional
[description], by default None
z2 : [type], optional
[description], by default None
delta_z : [type], optional
[description], by default None
length : [type], optional
[description], by default None
ge : int, optional
[description], by default 1
angle : [type], optional
[description], by default None
inc : [type], optional
[description], by default None
p1 : int, optional
[description], by default 0
Returns
-------
[type]
[description]
"""
# Assert height difference types
if delta_z is None:
if length is None:
assert isinstance(z1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(z2,(float,int,np.ndarray,np.int64,np.float64)), f"{type(z1)} {type(z2)}"
z1 = np.atleast_1d(z1)
z2 = np.atleast_1d(z2)
#assert z1.shape == (1,) and z2.shape == (1,)
delta_z = z1-z2
else:
assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))
length = np.atleast_1d(length)
#assert length.shape == (1,)
if angle is None:
assert isinstance(inc,(float,int,np.ndarray,np.int64,np.float64))
inc = np.atleast_1d(inc)
assert inc <= 90 and inc >= -90
sign = np.sign(inc)
angle = (90 - np.abs(inc)) * sign
else:
# Assert angle between -90 and 90
assert isinstance(angle,(float,int,np.ndarray,np.int64,np.float64))
angle = np.atleast_1d(angle)
assert angle <= 90 and angle >= -90
delta_z = length * np.sin(np.radians(angle))
else:
assert isinstance(delta_z,(float,int,np.ndarray,np.int64,np.float64))
delta_z = np.atleast_1d(delta_z)
#assert delta_z.shape == (1,)
#Assert ge be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0, f"{ge} {type(ge)} not allowed"
#Calculate Delta P
delta_p = 0.433 * ge * delta_z
#Calculate P2
p2 = p1 + delta_p
return delta_p, p2
def kinetic_energy_change(d1=None,d2=None, ge=1,rate=None,p1=0):
"""
Δp KE is the pressure drop resulting from a change in the velocity of the fluid between positions 1 and 2.
It will be zero for an incompressible fluid unless the cross-sectional area of the pipe is different at the
two positions of interest.
Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 172
"""
assert isinstance(d1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(d2,(float,int,np.ndarray,np.int64,np.float64))
d1 = np.atleast_1d(d1)
d2 = np.atleast_1d(d2)
#Assert Specifi Gravity be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0
ge = np.atleast_1d(ge)
# Rate in bbl/d
assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0
rate = np.atleast_1d(rate)
#Estimate Density in lb/ft3
rho = 62.4 * ge
#Estimate delta Pressure in psi
delta_p = 1.53e-8 * np.power(rate,2) * rho * ((1/np.power(d1,4))-(1/np.power(d2,4)))
p2 = p1 + delta_p
return delta_p, p2
def reynolds_number(rate,rho,d,mu):
"""
Reynolds Number where q is in bbl/d, ρ in lb m /ft 3 , D in in., and μ in cp.
"""
nre = (1.48 * rate * rho) / (d * mu)
return nre
def frictional_pressure_drop(
rate=None,
epsilon=0.001,
ge=1,
d=None,
mu=1,
length=None):
# Rate in bbl/d
assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0
rate = np.atleast_1d(rate)
# pipe relative roughness
assert isinstance(epsilon,(float,int,np.ndarray,np.int64,np.float64))
epsilon = np.atleast_1d(epsilon)
#Assert Specifi Gravity be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0
ge = np.atleast_1d(ge)
assert isinstance(d,(float,int,np.ndarray,np.int64,np.float64))
d = np.atleast_1d(d)
assert isinstance(mu,(float,int,np.ndarray,np.int64,np.float64))
mu = np.atleast_1d(mu)
assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))
length = np.atleast_1d(length)
#Estimate Density in lb/ft3
rho = 62.4 * ge
#Reynolds Number
nre = reynolds_number(rate,rho,d,mu)
#Friction Factor
if nre == 0:
ff = 0
else:
ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Velocity ft/s
u = (4*rate*5.615)/(np.pi*np.power(d/12,2)*86400)
delta_p = (2 * ff * rho * np.power(u,2) * length)/(32.17 * (d/12) * 144)
delta_p *= -1
return delta_p
def one_phase_pressure_profile(
p1=0,
ge=1,
epsilon=0.001,
md=None,
tvd=None,
d = None,
rate = None,
mu=None,
backwards=1
):
assert isinstance(md,(int,float,list,np.ndarray))
md = np.atleast_1d(md)
if tvd is None:
tvd = md
else:
assert isinstance(tvd,(int,float,list,np.ndarray))
tvd = np.atleast_1d(tvd)
assert isinstance(d,(int,float,list,np.ndarray))
if isinstance(d,(int,float)):
d = np.full(md.shape,d)
else:
d = np.atleast_1d(d)
assert isinstance(rate,(int,float, np.ndarray))
rate = np.atleast_1d(rate)
assert isinstance(mu,(int,float, np.ndarray))
mu = np.atleast_1d(mu)
assert isinstance(p1,(int,float, np.ndarray))
p1 = np.atleast_1d(p1)
assert isinstance(ge,(int,float, np.ndarray))
ge = np.atleast_1d(ge)
assert isinstance(epsilon,(int,float, np.ndarray))
epsilon = np.atleast_1d(epsilon)
assert md.shape[0] == tvd.shape[0] == d.shape[0]
n = md.shape[0]
#Create arrays
pressure = np.zeros(n)
ppe = np.zeros(n)
pke = np.zeros(n)
pf = np.zeros(n)
delta_p = np.zeros(n)
gradient = np.zeros(n)
pressure[0] = p1
for i in range(1,n):
#Potential Energy Change
ppe[i], _ = potential_energy_change(
z1=tvd[i-1],
z2=tvd[i],
ge= ge,
)
#Kinetic Energy Change
pke[i], _ = kinetic_energy_change(
d1=d[i-1],
d2=d[i],
rate=rate,
ge=ge,
)
#Frictional Pressure drop
pf[i] = frictional_pressure_drop(
rate=rate,
epsilon=epsilon,
ge=ge,
d=d[i],
mu=mu,
length=np.abs(md[i-1]-md[i])
) * backwards
delta_p[i] = ppe[i] + pke[i] + pf[i]
pressure[i] = pressure[i-1] + delta_p[i]
gradient[i] = (pressure[i] - pressure[i-1])/np.abs(tvd[i] - tvd[i-1])
# Create dataframe
pressure_profile = pd.DataFrame({
'md':md,
'tvd':tvd,
'diameter':d,
'pressure':pressure,
'ppe': ppe,
'pke': pke,
'pf' : pf,
'delta_p': delta_p,
'gradient': gradient
}).set_index('md')
p2 = pressure[-1]
return pressure_profile, p2
## Gas Outflow functions
def gas_pressure_profile_correlation(thp,sg,depth):
assert isinstance(thp,(float,int,np.ndarray,np.int64,np.float64))
thp = np.atleast_1d(thp)
assert thp.ndim == 1
assert isinstance(sg,(float,int,np.ndarray,np.int64,np.float64))
sg = np.atleast_1d(sg)
assert sg.shape == (1,)
assert isinstance(depth,(list,float,int,np.ndarray))
depth = np.atleast_1d(depth)
assert sg.ndim == 1
pwf = thp*np.exp(3.47e-5*depth)
return pwf
def gas_pressure_profile(
md = None,
inc = None,
thp = None,
rate = None,
gas_obj = None,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20):
"""
To calculate the pressure drop in a gas well, the compressibility of the fluid must be considered. When
the fluid is compressible, the fluid density and fluid velocity vary along the pipe, and these variations
must be included when integrating the mechanical energy balance equation.
Petroleum Production Systems, Economides. Chapter 7 7.3. Single-Phase Flow of a Compressible, Newtonian Fluid. Page 175
"""
# Assert the right types and shapes for input
assert isinstance(md, (np.ndarray,pd.Series))
md = np.atleast_1d(md)
assert md.ndim ==1
assert isinstance(inc, (int,float,np.ndarray,pd.Series))
if isinstance(inc,np.ndarray):
assert inc.shape == md.shape
else:
inc = np.full(md.shape,inc)
angle = np.radians(90 - inc)
assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
thp = np.atleast_1d(thp)
assert thp.shape == (1,)
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(di, (int,float,np.ndarray))
if isinstance(di,np.ndarray):
assert di.shape == md.shape
else:
di = np.full(md.shape,di)
assert isinstance(rate, (int,float,np.ndarray))
rate = np.atleast_1d(rate)
assert rate.shape == (1,)
assert gas_obj.sg is not None
#Create the variables
pressure_profile = np.zeros(md.shape)
temperature_profile = np.zeros(md.shape)
pressure_gradient = np.zeros(md.shape)
pressure_profile[0] = thp
temperature_profile[0] = surf_temp
interations = np.zeros(md.shape)
if gas_obj.chromatography is not None:
df_rho = gas_obj.chromatography.get_rhog(p=thp,t=surf_temp, rhog_method='real_gas')
else:
df_rho = gas_obj.pvt.interpolate(thp,property='rhog')
grad_guess = df_rho['rhog'].values*(0.433/62.4)
#Loop over depth
for i in range(1,md.shape[0]):
err = tol + 0.01
dz = np.sin(angle[i])*(md[i]-md[i-1])
gas_sg = gas_obj.sg
it = 0
while err>= tol and it <= max_iter:
p_guess = grad_guess*(md[i]-md[i-1])*np.sin(angle[i]) + pressure_profile[i-1]
#Interpolate pvt
df_pvt = gas_obj.pvt.interpolate(p_guess)
#Reynolds Number
#nre = (4*28.97*gas_obj.sg*rate*14.7)/(np.pi*di[i]*df_pvt['mug'].values*10.73*520)
nre = 20.09*(gas_sg*rate)/(di[i]*df_pvt['mug'].values)
#Friction Factor
friction = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Temperature
temperature_profile[i] = dz * (temp_grad/100) + temperature_profile[i-1]
#S
s = (-0.0375*gas_obj.sg*dz)/(df_pvt['z'].values*(temperature_profile[i]+460))
#Calculate next pressure by parts for easily read
a = np.exp(-s) * np.power(pressure_profile[i-1],2)
b = (friction*np.power(df_pvt['z'].values*(temperature_profile[i]+460)*rate,2))/(np.sin(angle[i])*np.power(di[i],5))
c = 1 - np.exp(-s)
p_new = np.sqrt(a - (2.685e-3*b*c))
grad_new = (p_new - pressure_profile[i-1])/dz
err = np.abs(grad_guess-grad_new)/grad_new
grad_guess = grad_new
it +=1
pressure_gradient[i] = grad_new
pressure_profile[i] = p_new
interations[i] = it
df_dict = {
'pressure':pressure_profile,
'pressure_gradient': pressure_gradient,
'temperature': temperature_profile,
'iterations': interations
}
df = pd.DataFrame(df_dict, index = md)
pwf = pressure_profile[-1]
return df, pwf
def gas_upward_pressure(
md = None,
inc = None,
pwf = None,
rate = None,
gas_obj = None,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20,
guess=None,
grad_guess = [0.02,0.05]
):
if guess is None:
grad = np.atleast_1d(grad_guess)
delta_h = np.abs(md[-1] - md[0])
guess = pwf - grad * delta_h
else:
assert isinstance(guess,(list,np.ndarray))
guess = np.atleast_1d(guess)
def solve(x):
_,_pwf = gas_pressure_profile(
md = md,
inc = inc,
thp = x,
rate = rate,
gas_obj = gas_obj,
di=di,
surf_temp=surf_temp,
temp_grad=temp_grad,
epsilon = epsilon,
tol = tol,
max_iter=max_iter,
)
return pwf - _pwf
sol = root_scalar(solve, x0=guess[0],x1=guess[1])
return sol.root
def gas_outflow_curve(
md = None,
inc = None,
thp = None,
gas_obj = None,
rate=None,
min_rate=100,
max_rate=8000,
n_rate=20,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20,
operating_point = None,
op_n = 30
):
# Assert the right types and shapes for input
assert isinstance(md, (np.ndarray,pd.Series)) and md.ndim ==1
md = np.atleast_1d(md)
assert isinstance(inc, (int,float,np.ndarray,pd.Series))
if isinstance(inc,np.ndarray):
assert inc.shape == md.shape
else:
inc = np.full(md.shape,inc)
angle = np.radians(90 - inc)
assert isinstance(thp, (int,float,list,np.ndarray))
thp = np.atleast_1d(thp)
assert thp.ndim == 1
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(di, list)
assert isinstance(rate, (int,float,list,np.ndarray,type(None)))
if rate is None:
rate = np.linspace(min_rate,max_rate,n_rate)
else:
rate = np.atleast_1d(rate)
assert rate.ndim == 1
assert gas_obj.sg is not None
pwf = np.zeros(rate.shape[0]*thp.shape[0]*len(di))
thp_arr = np.zeros(pwf.shape)
di_arr = np.zeros(pwf.shape)
gas_arr = np.zeros(pwf.shape)
name_list = []
i = 0
for p in thp:
for d in di:
for q in rate:
_,pwf[i] = gas_pressure_profile(
md = md,
inc = inc,
thp = p,
rate = q,
gas_obj = gas_obj,
surf_temp=surf_temp,
temp_grad=temp_grad,
di=d
)
gas_arr[i] = q
thp_arr[i] = p
di_arr[i] = d
case_name = f'thp-{p}_di-{d}'
name_list.append(case_name)
i += 1
#df = pd.DataFrame(pwf,columns=name_list,index=rate)
arr=np.column_stack((pwf,thp_arr,di_arr))
df = pd.DataFrame(arr,columns=['pwf','thp','di'],index=gas_arr)
df['case'] = name_list
df.index.name = 'gas'
op = pd.DataFrame()
if operating_point is not None:
inflow = operating_point.df
for case in df['case'].unique():
df_case = df[df['case']==case]
points, idx = intercept_curves(inflow['q'],inflow['p'],df_case.index,df_case['pwf'], n=op_n)
points_df = pd.DataFrame(points[[-1],:], columns=['q','p'])
points_df['case'] = case
points_df['idx'] = idx
op = op.append(points_df)
op = op.merge(df.groupby('case').mean(), left_on='case', right_on='case')
return df, op
### Multiphase Pressure Gradients
def flow_regime_plot(
ql=None,
qg=None,
d=2.99,
sg_liquid = 1,
surface_tension=30,
ax=None,
method = 'duns_ros',
**kwargs
):
"""
Plot Flow Regime from Duns and Ros Flow Regime Map
Coordinates extracted from Figure7-10 Duns and Ros Flow Regime Map
https://apps.automeris.io/wpd/
Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 84
"""
if d is not None:
assert isinstance(d,(int,float,list,np.ndarray,pd.Series))
d = np.atleast_1d(d)
# Estimate Cross section Area [ft2] from diameter [in]
a = np.power((d*0.5)/12,2)*np.pi
if ql is not None:
assert isinstance(ql,(int,float,list,np.ndarray,pd.Series))
ql = np.atleast_1d(ql)
#Liquid velocity. Convert bbl/d to ft3/s then divide area. Result velocity in ft/s
usl = (ql * 5.616 * (1/86400))/a
#Calculate the dimensionless numbers for each phase
nvl = 1.938 * usl * np.power((sg_liquid*62.4)/surface_tension,0.25)
if qg is not None:
assert isinstance(ql,(int,float,list,np.ndarray,pd.Series))
qg = np.atleast_1d(qg)
#Gas velocity. Convert ft3/d to ft3/s then divide area. Result velocity in ft/s
usg = (qg * (1/86400))/a
nvg = 1.938 * usg * np.power((sg_liquid*62.4)/surface_tension,0.25)
if method == 'duns_ros':
fax= ax or plt.gca()
region_1_2 = np.array([
[1.1753722651306362, 0.1082636733874053],
[1.1913061720030635, 0.16102620275609392],
[1.3268047497147244, 0.23950266199874834],
[1.4777148689707504, 0.35154183187529914],
[1.7604108438655526, 0.5228664844415476],
[2.1544346900318843, 0.7880462815669913],
[2.8585141796844757, 1.2358165955824107],
[3.545745842465605, 1.790084628235539],
[5.529553425383406, 3.2470894518548166],
[8.507942799627454, 5.512889788770675],
[16.68100537200059, 11.566937549363251],
[29.76351441631322, 20.43359717856943],
[61.58482110660267, 39.079952122756026],
[41.11829402435837, 27.703123342457815],
[79.53985507023424, 48.93900918477497],
])
region_2_t = np.array([
[53.10631887314356, 0.10543589908346815],
[59.146605445917515, 0.18139306939110614],
[66.7669293918757, 0.36097012876068046],
[80.61813527211957, 0.7674630429274295],
[104.12232560483065, 1.5475873545578884],
[141.92103954525945, 2.7338936055226313],
[270.8622850933671, 5.9684569951223105],
[204.14630347954724, 4.230939172613499],
[340.53655850163904, 7.674630429274299],
[503.2159359259993, 12.195704601594414],
[714.1692874235849, 18.380944176677932],
[922.3851039358485, 23.324701361610806],
])
region_t_3 = np.array([
[92.23851039358486, 0.10684043121253317],
[97.34285811778867, 0.15475873545578891],
[105.53385749880759, 0.24269312356542563],
[115.96514767613999, 0.41204298882016666],
[136.30221830031346, 0.7278953843983147],
[183.29807108324394, 1.2358165955824107],
[263.6650898730361, 2.271547585601246],
[364.25331154496416, 4.120429888201667],
[531.0631887314356, 6.995642156712631],
[714.1692874235849, 11.264816923358868],
[947.5632026539927, 18.139306939110632],
])
fax.plot(region_1_2[:,0],region_1_2[:,1], color='black',linestyle='--')
fax.plot(region_2_t[:,0],region_2_t[:,1], color='black',linestyle='--')
fax.plot(region_t_3[:,0],region_t_3[:,1], color='black',linestyle='--')
fax.set_ylabel('Nvl')
fax.set_ylabel('Nvg')
fax.set_title('Duns and Ros Flow Regime Map')
fax.set_xlim([0.1,1000])
fax.set_ylim([0.1,100])
annot = kwargs.pop('ann',True)
font = kwargs.pop('fontsize',8)
if annot:
fax.annotate(
f"Region I \n Bubble Flow or \n low-velocity slug flow",
xy = (0.2,0.15),
xycoords='data',
xytext=(0, 0),
textcoords='offset points',
bbox={'boxstyle':'round', 'fc':'0.8'},
fontsize = font
)
fax.annotate(
f"Region II \n High-velocity Flow or \n churn flow",
xy = (2,0.15),
xycoords='data',
xytext=(0, 0),
textcoords='offset points',
bbox={'boxstyle':'round', 'fc':'0.8'},
fontsize = font
)
fax.annotate(
f"Region III \n Annular Flow Pattern",
xy = (300,0.15),
xycoords='data',
xytext=(0, 0),
textcoords='offset points',
bbox={'boxstyle':'round', 'fc':'0.8'},
fontsize = font
)
if ql is not None and qg is not None:
fax.scatter(nvg,nvl,color='blue',marker = "^")
if method == 'taitel_dukler':
fax= ax or plt.gca()
region_E = np.array([
[14.977474763452001, 0.0022033318988979545],
[14.977474763452001, 0.006595844345274293],
[14.977474763452001, 0.04746934676639568],
[14.777148689707504, 0.9165263295637442],
[14.977474763452001, 6.87270243904312],
[14.977474763452001, 15.857064005032758]
])
region_A = np.array([
[0.08858667904100832, 0.0022372323125884317],
[0.08858667904100832, 0.005091596044287256],
[0.0986624843178949, 0.018460289732281962],
[0.11137395078578621, 0.04142593768347061],
[0.1326804749714725, 0.08679099331751502],
[0.1668100537200059, 0.18431459769950134],
[0.21256187881919958, 0.3275265038954424],
[0.30575961084169306, 0.695276382058884],
[0.46415888336127775, 1.2691784682206282],
[0.7336637748600019, 2.019816384578137],
[0.9223851039358476, 2.412109197346714]
])
region_B = np.array([
[0.028585141796844758, 3.4805610999729812],
[0.0531063188731435, 3.5220947122633963],
[0.08623280529014943, 3.517016970779084],
[0.24649769667586238, 3.2292570594299215],
[0.8978760230238888, 2.4455928433916867],
[2.0971883035581533, 1.7556200043179786],
[5.239601353002639, 4.20919831000811],
[10.412232560483055, 7.572933314656229],
[14.579502008614657, 10.657087726496014],
])
region_D = np.array([
[0.26366508987303583, 0.44861391200434203],
[0.30575961084169306, 0.4018483957905594],
[0.4398198780581129, 0.2288467215238852],
[0.5032159359259996, 0.16920697751727592],
[0.5835551032264551, 0.11058672774921392],
[0.6676692939187563, 0.05647578739286295],
[0.6951927961775606, 0.03743162248826758],
[0.7536903980898542, 0.02284801683862376],
[0.7639077845044221, 0.015565548854263186],
[0.7436096708208817, 0.011357807043115235],
[0.7847599703514607, 0.006933286608265855],
[0.7536903980898542, 0.0027304200384003397],
[0.7436096708208817, 0.002162999360197944],
])
fax.plot(region_A[:,0],region_A[:,1], color='black',linestyle='--')
fax.plot(region_B[:,0],region_B[:,1], color='black',linestyle='--')
fax.plot(region_D[:,0],region_D[:,1], color='black',linestyle='--')
fax.plot(region_E[:,0],region_E[:,1], color='black',linestyle='--')
fax.set_ylabel('Usg [m/s]')
fax.set_ylabel('Usl [m/s]')
fax.set_title('Taitel-Dukler flow regime map')
fax.set_xlim([0.01,100])
fax.set_ylim([0.001,10])
if ql is not None and qg is not None:
fax.scatter(usg*0.3048,usl*0.3048,color='blue',marker = "^")
fax.set_yscale('log')
fax.set_xscale('log')
def hb_correlation(
pressure=None, #Pressure [psi]
temperature=None, #Temperature [F]
liquid_rate=None, # Liquid Flow [bbl/d]
gas_rate=None, # gas flow [kscfd]
ten_liquid=None, #Surface tension dyne/cm2
rho_liquid=None, # density lb/ft3
rho_gas=None, # density lb/ft3
mu_liquid=None, # Viscosity [cp]
mu_gas=None, # Viscosity [cp]
z=1, # Gas compressibility Factor
di=None, # Diameter,
epsilon = 0.0006,
):
"""
The modified Hagedorn and Brown method (mH-B) is an empirical two-phase flow correlation based
on the original work of Hagedorn and Brown (1965). The heart of the Hagedorn-Brown method is a
correlation for liquid holdup; the modifications of the original method include using the no-slip holdup
when the original empirical correlation predicts a liquid holdup value less than the no-slip holdup and
the use of the Griffith correlation (Griffith and Wallis, 1961) for the bubble flow regime.
Petroleum Production Systems, Economides. Chapter 7 7.4.3.1. The Modified Hagedorn and Brown Method Page 187
"""
#Check types and converto to np.ndarray
assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))
pressure = np.atleast_1d(pressure)
assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))
temperature = np.atleast_1d(temperature)
assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))
liquid_rate = np.atleast_1d(liquid_rate)
assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))
gas_rate = np.atleast_1d(gas_rate)
assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))
ten_liquid = np.atleast_1d(ten_liquid)
assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))
rho_liquid = np.atleast_1d(rho_liquid)
assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))
rho_gas = np.atleast_1d(rho_gas)
assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))
mu_liquid = np.atleast_1d(mu_liquid)
assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))
mu_gas = np.atleast_1d(mu_gas)
assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))
z = np.atleast_1d(z)
assert isinstance(di,(int,float,np.ndarray,np.float64,np.int64))
di = np.atleast_1d(di)
assert isinstance(epsilon,(int,float,np.ndarray,np.float64,np.int64))
epsilon = np.atleast_1d(epsilon)
griffith = False
area = np.power((di*0.5)/12,2)*np.pi
usl = (liquid_rate * 5.615)/(area * 86400)
usg = (4*gas_rate*1000*z*(460+temperature)*14.7)/(86400*pressure*520*np.pi*np.power(di/12,2))
#Mixure Velocity
um = usl + usg
lambda_g = usg / um
lambda_l = 1 - lambda_g
#Check if Buble flow exist
lb = 1.071 - 0.2218 * (np.power(um,2)/(di/12))
if lb < 0.13:
lb = 0.13
if lb > lambda_g:
yl=1-0.5*(1+(um/0.8)-np.sqrt(np.power(1+(um/0.8),2)-4*(usg/0.8)))
griffith=True
else:
#Calculate Dimensionless numbers
nvl= 1.938*usl*np.power(rho_liquid/ten_liquid,0.25) #Liquid Velocity Number
nvg=1.938*usg*np.power(rho_liquid/ten_liquid,0.25) #Gas Velocity Number
nd=120.872*(di/12)*np.power(rho_liquid/ten_liquid,0.5) #Pipe Diameter Number
nl=0.15726*mu_liquid*np.power(1/(rho_liquid * np.power(ten_liquid,3)),0.25)
#cnl=(0.0019+0.0322*nl-0.6642*np.power(nl,2)+4.9951*np.power(nl,3))/(1+10.0147*nl-33.8696*np.power(nl,2)+277.2817*np.power(nl,3)) # original
cnl=(0.0019+0.0505*nl-0.0929*np.power(nl,2)+0.061*np.power(nl,3)) #pengtools
# H
h = (nvl/np.power(nvg,0.575)) * np.power(pressure/14.7,0.1) * (cnl/nd)
#yi/phi ratio
yl_ratio = np.power(((0.0047+1123.32*h-729489.64*np.power(h,2))/(1+1097.1566*h-722153.97*np.power(h,2))),0.5)
#B
b = nvg * np.power(nl,0.38)/np.power(nd,2.14)
#Psi calculated by equation from pengtools
# https://wiki.pengtools.com/index.php?title=Hagedorn_and_Brown_correlation
if b > 0.055:
psi = 2.5714*b + 1.5962
elif b > 0.025:
psi = -533.33*np.power(b,2) + 58.524*b + 0.1171
else:
psi = 27170*np.power(b,3) - 317.52 * np.power(b,2) + 0.5472*b + 0.9999
# Psi calculated from Economides
#psi=(1.0886+69.9473*b-2334.3497*np.power(b,2)+12896.683*np.power(b,3))/(1+53.4401*b-1517.9369*np.power(b,2)+8419.8115*np.power(b,3))
#yl
yl = yl_ratio * psi
if yl < lambda_l:
yl = lambda_l
# Mass flow in lb/d
mass_flow = area * (usl * rho_liquid + usg * rho_gas) * 86400
#Reynolds Number
nre = (2.2e-2 * mass_flow) / ((di/2) * np.power(mu_liquid,yl) * np.power(mu_gas,1-yl))
#Friction Factor
ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Average density
rho_avg = yl*rho_liquid + (1-yl)*rho_gas
if griffith:
pressure_gradient = (1/144)*(rho_avg+((ff*np.power(mass_flow,2))/(7.413e10*np.power(di/12,5)*rho_avg*np.power(yl,2))))
else:
pressure_gradient = (1/144)*(rho_avg+((ff*np.power(mass_flow,2))/(7.413e10*np.power(di/12,5)*rho_avg)))
return pressure_gradient
def gray_correlation(
pressure=None, #Pressure [psi]
temperature=None, #Temperature [F]
liquid_rate=None, # Liquid Flow [bbl/d]
gas_rate=None, # gas flow [kscfd]
ten_liquid=None, #Surface tension dyne/cm2
rho_liquid=None, # density lb/ft3
rho_gas=None, # density lb/ft3
mu_liquid=None, # Viscosity [cp]
mu_gas=None, # Viscosity [cp]
z=1, # Gas compressibility Factor
di=None, # Diameter,
epsilon = 0.0006,
):
#Check types and converto to np.ndarray
assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))
pressure = np.atleast_1d(pressure)
assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))
temperature = np.atleast_1d(temperature)
assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))
liquid_rate = np.atleast_1d(liquid_rate)
assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))
gas_rate = np.atleast_1d(gas_rate)
assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))
ten_liquid = np.atleast_1d(ten_liquid)
assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))
rho_liquid = np.atleast_1d(rho_liquid)
assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))
rho_gas = np.atleast_1d(rho_gas)
assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))
mu_liquid = np.atleast_1d(mu_liquid)
assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))
mu_gas = np.atleast_1d(mu_gas)
assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))
z = | np.atleast_1d(z) | numpy.atleast_1d |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# coding=utf-8
"""
Classes and utilities for operating the wavefront sensors of the MMTO and analyzing the data they produce
"""
import warnings
import pathlib
import numpy as np
import photutils
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from skimage import feature
from scipy import ndimage, optimize
from scipy.ndimage import rotate
from scipy.spatial import cKDTree
import lmfit
import astropy.units as u
from astropy.io import fits
from astropy.io import ascii
from astropy import stats, visualization, timeseries
from astropy.modeling.models import Gaussian2D, Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.table import conf as table_conf
from astroscrappy import detect_cosmics
from ccdproc.utils.slices import slice_from_string
from .config import recursive_subclasses, merge_config, mmtwfs_config
from .telescope import TelescopeFactory
from .f9topbox import CompMirror
from .zernike import ZernikeVector, zernike_slopes, cart2pol, pol2cart
from .custom_exceptions import WFSConfigException, WFSAnalysisFailed, WFSCommandException
import logging
import logging.handlers
log = logging.getLogger("WFS")
log.setLevel(logging.INFO)
warnings.simplefilter(action="ignore", category=FutureWarning)
table_conf.replace_warnings = ['attributes']
__all__ = ['SH_Reference', 'WFS', 'F9', 'NewF9', 'F5', 'Binospec', 'MMIRS', 'WFSFactory', 'wfs_norm', 'check_wfsdata',
'wfsfind', 'grid_spacing', 'center_pupil', 'get_apertures', 'match_apertures', 'aperture_distance', 'fit_apertures',
'get_slopes', 'make_init_pars', 'slope_diff', 'mk_wfs_mask']
def wfs_norm(data, interval=visualization.ZScaleInterval(contrast=0.05), stretch=visualization.LinearStretch()):
"""
Define default image normalization to use for WFS images
"""
norm = visualization.mpl_normalize.ImageNormalize(
data,
interval=interval,
stretch=stretch
)
return norm
def check_wfsdata(data, header=False):
"""
Utility to validate WFS data
Parameters
----------
data : FITS filename or 2D ndarray
WFS image
Returns
-------
data : 2D np.ndarray
Validated 2D WFS image
"""
hdr = None
if isinstance(data, (str, pathlib.PosixPath)):
# we're a fits file (hopefully)
try:
with fits.open(data, ignore_missing_simple=True) as h:
data = h[-1].data # binospec images put the image data into separate extension so always grab last available.
if header:
hdr = h[-1].header
except Exception as e:
msg = "Error reading FITS file, %s (%s)" % (data, repr(e))
raise WFSConfigException(value=msg)
if not isinstance(data, np.ndarray):
msg = "WFS image data in improper format, %s" % type(data)
raise WFSConfigException(value=msg)
if len(data.shape) != 2:
msg = "WFS image data has improper shape, %dD. Must be 2D image." % len(data.shape)
raise WFSConfigException(value=msg)
if header and hdr is not None:
return data, hdr
else:
return data
def mk_wfs_mask(data, thresh_factor=50., outfile="wfs_mask.fits"):
"""
Take a WFS image and mask/scale it so that it can be used as a reference for pupil centering
Parameters
----------
data : FITS filename or 2D ndarray
WFS image
thresh_factor : float (default: 50.)
Fraction of maximum value below which will be masked to 0.
outfile : string (default: wfs_mask.fits)
Output FITS file to write the resulting image to.
Returns
-------
scaled : 2D ndarray
Scaled and masked WFS image
"""
data = check_wfsdata(data)
mx = data.max()
thresh = mx / thresh_factor
data[data < thresh] = 0.
scaled = data / mx
if outfile is not None:
fits.writeto(outfile, scaled)
return scaled
def wfsfind(data, fwhm=7.0, threshold=5.0, plot=True, ap_radius=5.0, std=None):
"""
Use photutils.DAOStarFinder() to find and centroid spots in a Shack-Hartmann WFS image.
Parameters
----------
data : FITS filename or 2D ndarray
WFS image
fwhm : float (default: 5.)
FWHM in pixels of DAOfind convolution kernel
threshold : float
DAOfind threshold in units of the standard deviation of the image
plot: bool
Toggle plotting of the reference image and overlayed apertures
ap_radius : float
Radius of plotted apertures
"""
# data should be background subtracted first...
data = check_wfsdata(data)
if std is None:
mean, median, std = stats.sigma_clipped_stats(data, sigma=3.0, maxiters=5)
daofind = photutils.DAOStarFinder(fwhm=fwhm, threshold=threshold*std, sharphi=0.95)
sources = daofind(data)
if sources is None:
msg = "WFS spot detection failed or no spots detected."
raise WFSAnalysisFailed(value=msg)
# this may be redundant given the above check...
nsrcs = len(sources)
if nsrcs == 0:
msg = "No WFS spots detected."
raise WFSAnalysisFailed(value=msg)
# only keep spots more than 1/4 as bright as the max. need this for f/9 especially.
sources = sources[sources['flux'] > sources['flux'].max()/4.]
fig = None
if plot:
fig, ax = plt.subplots()
fig.set_label("WFSfind")
positions = list(zip(sources['xcentroid'], sources['ycentroid']))
apertures = photutils.CircularAperture(positions, r=ap_radius)
norm = wfs_norm(data)
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
apertures.plot(color='red', lw=1.5, alpha=0.5, axes=ax)
return sources, fig
def grid_spacing(data, apertures):
"""
Measure the WFS grid spacing which changes with telescope focus.
Parameters
----------
data : WFS image (FITS or np.ndarray)
apertures : `~astropy.table.Table`
WFS aperture data to analyze
Returns
-------
xspacing, yspacing : float, float
Average grid spacing in X and Y axes
"""
data = check_wfsdata(data)
x = np.arange(data.shape[1])
y = np.arange(data.shape[0])
bx = np.arange(data.shape[1]+1)
by = np.arange(data.shape[0]+1)
# bin the spot positions along the axes and use Lomb-Scargle to measure the grid spacing in each direction
xsum = np.histogram(apertures['xcentroid'], bins=bx)
ysum = np.histogram(apertures['ycentroid'], bins=by)
k = np.linspace(10.0, 50., 1000) # look for spacings from 10 to 50 pixels (plenty of range, but not too small to alias)
f = 1.0 / k # convert spacing to frequency
xp = timeseries.LombScargle(x, xsum[0]).power(f)
yp = timeseries.LombScargle(y, ysum[0]).power(f)
# the peak of the power spectrum will coincide with the average spacing
xspacing = k[xp.argmax()]
yspacing = k[yp.argmax()]
return xspacing, yspacing
def center_pupil(input_data, pup_mask, threshold=0.8, sigma=10., plot=True):
"""
Find the center of the pupil in a WFS image using skimage.feature.match_template(). This generates
a correlation image and we centroid the peak of the correlation to determine the center.
Parameters
----------
data : str or 2D ndarray
WFS image to analyze, either FITS file or ndarray image data
pup_mask : str or 2D ndarray
Pupil model to use in the template matching
threshold : float (default: 0.0)
Sets image to 0 where it's below threshold * image.max()
sigma : float (default: 20.)
Sigma of gaussian smoothing kernel
plot : bool
Toggle plotting of the correlation image
Returns
-------
cen : tuple (float, float)
X and Y pixel coordinates of the pupil center
"""
data = np.copy(check_wfsdata(input_data))
pup_mask = check_wfsdata(pup_mask).astype(np.float64) # need to force float64 here to make scipy >= 1.4 happy...
# smooth the image to increae the S/N.
smo = ndimage.gaussian_filter(data, sigma)
# use skimage.feature.match_template() to do a fast cross-correlation between the WFS image and the pupil model.
# the location of the peak of the correlation will be the center of the WFS pattern.
match = feature.match_template(smo, pup_mask, pad_input=True)
find_thresh = threshold * match.max()
t = photutils.detection.find_peaks(match, find_thresh, box_size=5, centroid_func=photutils.centroids.centroid_com)
if t is None:
msg = "No valid pupil or spot pattern detected."
raise WFSAnalysisFailed(value=msg)
peak = t['peak_value'].max()
xps = []
yps = []
# if there are peaks that are very nearly correlated, average their positions
for p in t:
if p['peak_value'] >= 0.95*peak:
xps.append(p['x_centroid'])
yps.append(p['y_centroid'])
xp = np.mean(xps)
yp = np.mean(yps)
fig = None
if plot:
fig, ax = plt.subplots()
fig.set_label("Pupil Correlation Image (masked)")
ax.imshow(match, interpolation=None, cmap=cm.magma, origin='lower')
ax.scatter(xp, yp, marker="+", color="green")
return xp, yp, fig
def get_apertures(data, apsize, fwhm=5.0, thresh=7.0, plot=True, cen=None):
"""
Use wfsfind to locate and centroid spots. Measure their S/N ratios and the sigma of a 2D gaussian fit to
the co-added spot.
Parameters
----------
data : str or 2D ndarray
WFS image to analyze, either FITS file or ndarray image data
apsize : float
Diameter/width of the SH apertures
Returns
-------
srcs : astropy.table.Table
Detected WFS spot positions and properties
masks : list of photutils.ApertureMask objects
Masks used for aperture centroiding
snrs : 1D np.ndarray
S/N for each located spot
sigma : float
"""
data = check_wfsdata(data)
# set maxiters to None to let this clip all the way to convergence
if cen is None:
mean, median, stddev = stats.sigma_clipped_stats(data, sigma=3.0, maxiters=None)
else:
xcen, ycen = int(cen[0]), int(cen[1])
mean, median, stddev = stats.sigma_clipped_stats(data[ycen-50:ycen+50, xcen-50:ycen+50], sigma=3.0, maxiters=None)
# use wfsfind() and pass it the clipped stddev from here
with warnings.catch_warnings():
warnings.simplefilter("ignore")
srcs, wfsfind_fig = wfsfind(data, fwhm=fwhm, threshold=thresh, std=stddev, plot=plot)
# we use circular apertures here because they generate square masks of the appropriate size.
# rectangular apertures produced masks that were sqrt(2) too large.
# see https://github.com/astropy/photutils/issues/499 for details.
apers = photutils.CircularAperture(
list(zip(srcs['xcentroid'], srcs['ycentroid'])),
r=apsize/2.
)
masks = apers.to_mask(method='subpixel')
sigma = 0.0
snrs = []
if len(masks) >= 1:
spot = np.zeros(masks[0].shape)
for m in masks:
subim = m.cutout(data)
# make co-added spot image for use in calculating the seeing
if subim.shape == spot.shape:
spot += subim
signal = subim.sum()
noise = np.sqrt(stddev**2 * subim.shape[0] * subim.shape[1])
snr = signal / noise
snrs.append(snr)
snrs = np.array(snrs)
# set up 2D gaussian model plus constant background to fit to the coadded spot
with warnings.catch_warnings():
# ignore astropy warnings about issues with the fit...
warnings.simplefilter("ignore")
g2d = Gaussian2D(amplitude=spot.max(), x_mean=spot.shape[1]/2, y_mean=spot.shape[0]/2)
p2d = Polynomial2D(degree=0)
model = g2d + p2d
fitter = LevMarLSQFitter()
y, x = np.mgrid[:spot.shape[0], :spot.shape[1]]
fit = fitter(model, x, y, spot)
sigma = 0.5 * (fit.x_stddev_0.value + fit.y_stddev_0.value)
return srcs, masks, snrs, sigma, wfsfind_fig
def match_apertures(refx, refy, spotx, spoty, max_dist=25.):
"""
Given reference aperture and spot X/Y positions, loop through reference apertures and find closest spot. Use
max_dist to exclude matches that are too far from reference position. Return masks to use to denote validly
matched apertures.
"""
refs = np.array([refx, refy])
spots = np.array([spotx, spoty])
match = np.nan * np.ones(len(refx))
matched = []
for i in np.arange(len(refx)):
dists = np.sqrt((spots[0]-refs[0][i])**2 + (spots[1]-refs[1][i])**2)
min_i = np.argmin(dists)
if np.min(dists) < max_dist:
if min_i not in matched:
match[i] = min_i
matched.append(min_i)
else:
if min_i not in matched:
match[i] = np.nan
ref_mask = ~np.isnan(match)
src_mask = match[ref_mask]
return ref_mask, src_mask.astype(int)
def aperture_distance(refx, refy, spotx, spoty):
"""
Calculate the sum of the distances between each reference aperture and the closest measured spot position.
This total distance is the statistic to minimize when fitting the reference aperture grid to the data.
"""
refs = np.array([refx, refy]).transpose()
spots = np.array([spotx, spoty]).transpose()
tree = cKDTree(refs)
mindist, _ = tree.query(spots)
tot_dist = mindist.sum()
return np.log(tot_dist)
def fit_apertures(pars, ref, spots):
"""
Scale the reference positions by the fit parameters and calculate the total distance between the matches.
The parameters of the fit are:
``xc, yc = center positions``
``scale = magnification of the grid (focus)``
``xcoma, ycoma = linear change in magnification as a function of x/y (coma)``
'ref' and 'spots' are assumed to be dict-like and must have the keys 'xcentroid' and 'ycentroid'.
Parameters
----------
pars : list-like
The fit parameters passed in as a 5 element list: (xc, yc, scale, xcoma, ycoma)
ref : dict-like
Dict containing ``xcentroid`` and ``ycentroid`` keys that contain the reference X and Y
positions of the apertures.
spots : dict-like
Dict containing ``xcentroid`` and ``ycentroid`` keys that contain the measured X and Y
positions of the apertures.
Returns
-------
dist : float
The cumulative distance between the matched reference and measured aperture positions.
"""
xc = pars[0]
yc = pars[1]
scale = pars[2]
xcoma = pars[3]
ycoma = pars[4]
refx = ref['xcentroid'] * (scale + ref['xcentroid'] * xcoma) + xc
refy = ref['ycentroid'] * (scale + ref['ycentroid'] * ycoma) + yc
spotx = spots['xcentroid']
spoty = spots['ycentroid']
dist = aperture_distance(refx, refy, spotx, spoty)
return dist
def get_slopes(data, ref, pup_mask, fwhm=7., thresh=5., cen=[255, 255],
cen_thresh=0.8, cen_sigma=10., cen_tol=50., spot_snr_thresh=3.0, plot=True):
"""
Analyze a WFS image and produce pixel offsets between reference and observed spot positions.
Parameters
----------
data : str or 2D np.ndarray
FITS file or np.ndarray containing WFS observation
ref : `~astropy.table.Table`
Table of reference apertures
pup_mask : str or 2D np.ndarray
FITS file or np.ndarray containing mask used to register WFS spot pattern via cross-correlation
fwhm : float (default: 7.0)
FWHM of convolution kernel applied to image by the spot finding algorithm
thresh : float (default: 5.0)
Number of sigma above background for a spot to be considered detected
cen : list-like with 2 elements (default: [255, 255])
Expected position of the center of the WFS spot pattern in form [X_cen, Y_cen]
cen_thresh : float (default: 0.8)
Masking threshold as fraction of peak value used in `~photutils.detection.find_peaks`
cen_sigma : float (default: 10.0)
Width of gaussian filter applied to image by `~mmtwfs.wfs.center_pupil`
cen_tol : float (default: 50.0)
Tolerance for difference between expected and measureed pupil center
spot_snr_thresh : float (default: 3.0)
S/N tolerance for a WFS spot to be considered valid for analysis
plot : bool
Toggle plotting of image with aperture overlays
Returns
-------
results : dict
Results of the wavefront slopes measurement packaged into a dict with the following keys:
slopes - mask np.ndarry containing the slope values in pixel units
pup_coords - pupil coordinates for the position for each slope value
spots - `~astropy.table.Table` as returned by photutils star finder routines
src_aps - `~photutils.aperture.CircularAperture` for each detected spot
spacing - list-like of form (xspacing, yspacing) containing the mean spacing between rows and columns of spots
center - list-like of form (xcen, ycen) containing the center of the spot pattern
ref_mask - np.ndarray of matched spots in reference image
src_mask - np.ndarray of matched spots in the data image
spot_sigma - sigma of a gaussian fit to a co-addition of detected spots
figures - dict of figures that are optionally produced
grid_fit - dict of best-fit parameters of grid fit used to do fine registration between source and reference spots
"""
data = check_wfsdata(data)
pup_mask = check_wfsdata(pup_mask)
if ref.pup_outer is None:
raise WFSConfigException("No pupil information applied to SH reference.")
pup_outer = ref.pup_outer
pup_inner = ref.pup_inner
# input data should be background subtracted for best results. this initial guess of the center positions
# will be good enough to get the central obscuration, but will need to be fine-tuned for aperture association.
xcen, ycen, pupcen_fig = center_pupil(data, pup_mask, threshold=cen_thresh, sigma=cen_sigma, plot=plot)
if np.hypot(xcen-cen[0], ycen-cen[1]) > cen_tol:
msg = f"Measured pupil center [{round(xcen)}, {round(ycen)}] more than {cen_tol} pixels from {cen}."
raise WFSAnalysisFailed(value=msg)
# using the mean spacing is straightforward for square apertures and a reasonable underestimate for hexagonal ones
ref_spacing = np.mean([ref.xspacing, ref.yspacing])
apsize = ref_spacing
srcs, masks, snrs, sigma, wfsfind_fig = get_apertures(data, apsize, fwhm=fwhm, thresh=thresh, cen=(xcen, ycen))
# ignore low S/N spots
srcs = srcs[snrs > spot_snr_thresh]
# get grid spacing of the data
xspacing, yspacing = grid_spacing(data, srcs)
# find the scale difference between data and ref and use as init
init_scale = (xspacing/ref.xspacing + yspacing/ref.yspacing) / 2.
# apply masking to detected sources to avoid partially illuminated apertures at the edges
srcs['dist'] = np.sqrt((srcs['xcentroid'] - xcen)**2 + (srcs['ycentroid'] - ycen)**2)
srcs = srcs[(srcs['dist'] > pup_inner*init_scale) & (srcs['dist'] < pup_outer*init_scale)]
# if we don't detect spots in at least half of the reference apertures, we can't usually get a good wavefront measurement
if len(srcs) < 0.5 * len(ref.masked_apertures['xcentroid']):
msg = "Only %d spots detected out of %d apertures." % (len(srcs), len(ref.masked_apertures['xcentroid']))
raise WFSAnalysisFailed(value=msg)
src_aps = photutils.CircularAperture(
list(zip(srcs['xcentroid'], srcs['ycentroid'])),
r=apsize/2.
)
# set up to do a fit of the reference apertures to the spot positions with the center, scaling, and position-dependent
# scaling (coma) as free parameters
args = (ref.masked_apertures, srcs)
par_keys = ('xcen', 'ycen', 'scale', 'xcoma', 'ycoma')
pars = (xcen, ycen, init_scale, 0.0, 0.0)
coma_bound = 1e-4 # keep coma constrained by now since it can cause trouble
# scipy.optimize.minimize can do bounded minimization so leverage that to keep the solution within a reasonable range.
bounds = (
(xcen-15, xcen+15), # hopefully we're not too far off from true center...
(ycen-15, ycen+15),
(init_scale-0.05, init_scale+0.05), # reasonable range of expected focus difference...
(-coma_bound, coma_bound),
(-coma_bound, coma_bound)
)
try:
min_results = optimize.minimize(fit_apertures, pars, args=args, bounds=bounds, options={'ftol': 1e-13, 'gtol': 1e-7})
except Exception as e:
msg = f"Aperture grid matching failed: {e}"
raise WFSAnalysisFailed(value=msg)
fit_results = {}
for i, k in enumerate(par_keys):
fit_results[k] = min_results['x'][i]
# this is more reliably the center of the actual pupil image whereas fit_results shifts a bit depending on detected spots.
# the lenslet pattern can move around a bit on the pupil, but we need the center of the pupil to calculate their pupil
# coordinates.
pup_center = [xcen, ycen]
scale = fit_results['scale']
xcoma, ycoma = fit_results['xcoma'], fit_results['ycoma']
refx = ref.masked_apertures['xcentroid'] * (scale + ref.masked_apertures['xcentroid'] * xcoma) + fit_results['xcen']
refy = ref.masked_apertures['ycentroid'] * (scale + ref.masked_apertures['ycentroid'] * ycoma) + fit_results['ycen']
xspacing = scale * ref.xspacing
yspacing = scale * ref.yspacing
# coarse match reference apertures to spots
spacing = np.max([xspacing, yspacing])
ref_mask, src_mask = match_apertures(refx, refy, srcs['xcentroid'], srcs['ycentroid'], max_dist=spacing/2.)
# these are unscaled so that the slope includes defocus
trim_refx = ref.masked_apertures['xcentroid'][ref_mask] + fit_results['xcen']
trim_refy = ref.masked_apertures['ycentroid'][ref_mask] + fit_results['ycen']
ref_aps = photutils.CircularAperture(
list(zip(trim_refx, trim_refy)),
r=ref_spacing/2.
)
slope_x = srcs['xcentroid'][src_mask] - trim_refx
slope_y = srcs['ycentroid'][src_mask] - trim_refy
pup_coords = (ref_aps.positions - pup_center) / [pup_outer, pup_outer]
aps_fig = None
if plot:
norm = wfs_norm(data)
aps_fig, ax = plt.subplots()
aps_fig.set_label("Aperture Positions")
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
ax.scatter(pup_center[0], pup_center[1])
src_aps.plot(color='blue', axes=ax)
# need full slopes array the size of the complete set of reference apertures and pre-filled with np.nan for masking
slopes = np.nan * np.ones((2, len(ref.masked_apertures['xcentroid'])))
slopes[0][ref_mask] = slope_x
slopes[1][ref_mask] = slope_y
figures = {}
figures['pupil_center'] = pupcen_fig
figures['slopes'] = aps_fig
results = {
"slopes": np.ma.masked_invalid(slopes),
"pup_coords": pup_coords.transpose(),
"spots": srcs,
"src_aps": src_aps,
"spacing": (xspacing, yspacing),
"center": pup_center,
"ref_mask": ref_mask,
"src_mask": src_mask,
"spot_sigma": sigma,
"figures": figures,
"grid_fit": fit_results
}
return results
def make_init_pars(nmodes=21, modestart=2, init_zv=None):
"""
Make a set of initial parameters that can be used with `~lmfit.minimize` to make a wavefront fit with
parameter names that are compatible with ZernikeVectors.
Parameters
----------
nmodes: int (default: 21)
Number of Zernike modes to fit.
modestart: int (default: 2)
First Zernike mode to be used.
init_zv: ZernikeVector (default: None)
ZernikeVector containing initial values for the fit.
Returns
-------
params: `~lmfit.Parameters` instance
Initial parameters in form that can be passed to `~lmfit.minimize`.
"""
pars = []
for i in range(modestart, modestart+nmodes, 1):
key = "Z{:02d}".format(i)
if init_zv is not None:
val = init_zv[key].value
if val < 2. * np.finfo(float).eps:
val = 0.0
else:
val = 0.0
zpar = (key, val)
pars.append(zpar)
params = lmfit.Parameters()
params.add_many(*pars)
return params
def slope_diff(pars, coords, slopes, norm=False):
"""
For a given set of wavefront fit parameters, calculate the "distance" between the predicted and measured wavefront
slopes. This function is used by `~lmfit.minimize` which expects the sqrt to be applied rather than a chi-squared,
"""
parsdict = pars.valuesdict()
rho, phi = cart2pol(coords)
xslope = slopes[0]
yslope = slopes[1]
pred_xslope, pred_yslope = zernike_slopes(parsdict, rho, phi, norm=norm)
dist = np.sqrt((xslope - pred_xslope)**2 + (yslope - pred_yslope)**2)
return dist
class SH_Reference(object):
"""
Class to handle Shack-Hartmann reference data
"""
def __init__(self, data, fwhm=4.5, threshold=20.0, plot=True):
"""
Read WFS reference image and generate reference magnifications (i.e. grid spacing) and
aperture positions.
Parameters
----------
data : FITS filename or 2D ndarray
WFS reference image
fwhm : float
FWHM in pixels of DAOfind convolution kernel
threshold : float
DAOfind threshold in units of the standard deviation of the image
plot : bool
Toggle plotting of the reference image and overlayed apertures
"""
self.data = check_wfsdata(data)
data = data - np.median(data)
self.apertures, self.figure = wfsfind(data, fwhm=fwhm, threshold=threshold, plot=plot)
if plot:
self.figure.set_label("Reference Image")
self.xcen = self.apertures['xcentroid'].mean()
self.ycen = self.apertures['ycentroid'].mean()
self.xspacing, self.yspacing = grid_spacing(data, self.apertures)
# make masks for each reference spot and fit a 2D gaussian to get its FWHM. the reference FWHM is subtracted in
# quadrature from the observed FWHM when calculating the seeing.
apsize = np.mean([self.xspacing, self.yspacing])
apers = photutils.CircularAperture(
list(zip(self.apertures['xcentroid'], self.apertures['ycentroid'])),
r=apsize/2.
)
masks = apers.to_mask(method='subpixel')
self.photapers = apers
self.spot = np.zeros(masks[0].shape)
for m in masks:
subim = m.cutout(data)
# make co-added spot image for use in calculating the seeing
if subim.shape == self.spot.shape:
self.spot += subim
self.apertures['xcentroid'] -= self.xcen
self.apertures['ycentroid'] -= self.ycen
self.apertures['dist'] = np.sqrt(self.apertures['xcentroid']**2 + self.apertures['ycentroid']**2)
self.masked_apertures = self.apertures
self.pup_inner = None
self.pup_outer = None
def adjust_center(self, x, y):
"""
Adjust reference center to new x, y position.
"""
self.apertures['xcentroid'] += self.xcen
self.apertures['ycentroid'] += self.ycen
self.apertures['xcentroid'] -= x
self.apertures['ycentroid'] -= y
self.apertures['dist'] = np.sqrt(self.apertures['xcentroid']**2 + self.apertures['ycentroid']**2)
self.xcen = x
self.ycen = y
self.apply_pupil(self.pup_inner, self.pup_outer)
def apply_pupil(self, pup_inner, pup_outer):
"""
Apply a pupil mask to the reference apertures
"""
if pup_inner is not None and pup_outer is not None:
self.masked_apertures = self.apertures[(self.apertures['dist'] > pup_inner) & (self.apertures['dist'] < pup_outer)]
self.pup_inner = pup_inner
self.pup_outer = pup_outer
def pup_coords(self, pup_outer):
"""
Take outer radius of pupil and calculate pupil coordinates for the masked apertures
"""
coords = (self.masked_apertures['xcentroid']/pup_outer, self.masked_apertures['ycentroid']/pup_outer)
return coords
def WFSFactory(wfs="f5", config={}, **kwargs):
"""
Build and return proper WFS sub-class instance based on the value of 'wfs'.
"""
config = merge_config(config, dict(**kwargs))
wfs = wfs.lower()
types = recursive_subclasses(WFS)
wfses = [t.__name__.lower() for t in types]
wfs_map = dict(list(zip(wfses, types)))
if wfs not in wfses:
raise WFSConfigException(value="Specified WFS, %s, not valid or not implemented." % wfs)
if 'plot' in config:
plot = config['plot']
else:
plot = True
wfs_cls = wfs_map[wfs](config=config, plot=plot)
return wfs_cls
class WFS(object):
"""
Defines configuration pattern and methods common to all WFS systems
"""
def __init__(self, config={}, plot=True, **kwargs):
key = self.__class__.__name__.lower()
self.__dict__.update(merge_config(mmtwfs_config['wfs'][key], config))
self.telescope = TelescopeFactory(telescope=self.telescope, secondary=self.secondary)
self.secondary = self.telescope.secondary
self.plot = plot
self.connected = False
self.ref_fwhm = self.ref_spot_fwhm()
# this factor calibrates spot motion in pixels to nm of wavefront error
self.tiltfactor = self.telescope.nmperasec * (self.pix_size.to(u.arcsec).value)
# if this is the same for all modes, load it once here
if hasattr(self, "reference_file"):
refdata, hdr = check_wfsdata(self.reference_file, header=True)
refdata = self.trim_overscan(refdata, hdr)
reference = SH_Reference(refdata, plot=self.plot)
# now assign 'reference' for each mode so that it can be accessed consistently in all cases
for mode in self.modes:
if 'reference_file' in self.modes[mode]:
refdata, hdr = check_wfsdata(self.modes[mode]['reference_file'], header=True)
refdata = self.trim_overscan(refdata, hdr)
self.modes[mode]['reference'] = SH_Reference(
refdata,
plot=self.plot
)
else:
self.modes[mode]['reference'] = reference
def ref_spot_fwhm(self):
"""
Calculate the Airy FWHM in pixels of a perfect WFS spot from the optical prescription and detector pixel size
"""
theta_fwhm = 1.028 * self.eff_wave / self.lenslet_pitch
det_fwhm = np.arctan(theta_fwhm).value * self.lenslet_fl
det_fwhm_pix = det_fwhm.to(u.um).value / self.pix_um.to(u.um).value
return det_fwhm_pix
def get_flipud(self, mode=None):
"""
Determine if the WFS image needs to be flipped up/down
"""
return False
def get_fliplr(self, mode=None):
"""
Determine if the WFS image needs to be flipped left/right
"""
return False
def ref_pupil_location(self, mode, hdr=None):
"""
Get the center of the pupil on the reference image
"""
ref = self.modes[mode]['reference']
x = ref.xcen
y = ref.ycen
return x, y
def seeing(self, mode, sigma, airmass=None):
"""
Given a sigma derived from a gaussian fit to a WFS spot, deconvolve the systematic width from the reference image
and relate the remainder to r_0 and thus a seeing FWHM.
"""
# the effective wavelength of the WFS imagers is about 600-700 nm. mmirs and the oldf9 system use blue-blocking filters
wave = self.eff_wave
wave = wave.to(u.m).value # r_0 equation expects meters so convert
refwave = 500 * u.nm # standard wavelength that seeing values are referenced to
refwave = refwave.to(u.m).value
# calculate the physical size of each aperture.
ref = self.modes[mode]['reference']
apsize_pix = np.max((ref.xspacing, ref.yspacing))
d = self.telescope.diameter * apsize_pix / self.pup_size
d = d.to(u.m).value # r_0 equation expects meters so convert
# we need to deconvolve the instrumental spot width from the measured one to get the portion of the width that
# is due to spot motion
ref_sigma = stats.funcs.gaussian_fwhm_to_sigma * self.ref_fwhm
if sigma > ref_sigma:
corr_sigma = np.sqrt(sigma**2 - ref_sigma**2)
else:
return 0.0 * u.arcsec, 0.0 * u.arcsec
corr_sigma *= self.pix_size.to(u.rad).value # r_0 equation expects radians so convert
# this equation relates the motion within a single aperture to the characteristic scale size of the
# turbulence, r_0.
r_0 = (0.179 * (wave**2) * (d**(-1/3))/corr_sigma**2)**0.6
# this equation relates the turbulence scale size to an expected image FWHM at the given wavelength.
raw_seeing = u.Quantity(u.rad * 0.98 * wave / r_0, u.arcsec)
# seeing scales as lambda^-1/5 so calculate factor to scale to reference lambda
wave_corr = refwave**-0.2 / wave**-0.2
raw_seeing *= wave_corr
# correct seeing to zenith
if airmass is not None:
seeing = raw_seeing / airmass**0.6
else:
seeing = raw_seeing
return seeing, raw_seeing
def pupil_mask(self, hdr=None):
"""
Load and return the WFS spot mask used to locate and register the pupil
"""
pup_mask = check_wfsdata(self.wfs_mask)
return pup_mask
def reference_aberrations(self, mode, **kwargs):
"""
Create reference ZernikeVector for 'mode'.
"""
z = ZernikeVector(**self.modes[mode]['ref_zern'])
return z
def get_mode(self, hdr):
"""
If mode is not specified, either set it to the default mode or figure out the mode from the header.
"""
mode = self.default_mode
return mode
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
trimdata = self.trim_overscan(rawdata, hdr=hdr)
# MMIRS gets a lot of hot pixels/CRs so make a quick pass to nuke them
cr_mask, data = detect_cosmics(trimdata, sigclip=5., niter=5, cleantype='medmask', psffwhm=5.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=5, dilate_size=11)
bkg = photutils.Background2D(data, (10, 10), filter_size=(5, 5), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
def trim_overscan(self, data, hdr=None):
"""
Use the DATASEC in the header to determine the region to trim out. If no header provided or if the header
doesn't contain DATASEC, return data unchanged.
"""
if hdr is None:
return data
if 'DATASEC' not in hdr:
# if no DATASEC in header, punt and return unchanged
return data
datasec = slice_from_string(hdr['DATASEC'], fits_convention=True)
return data[datasec]
def measure_slopes(self, fitsfile, mode=None, plot=True, flipud=False, fliplr=False):
"""
Take a WFS image in FITS format, perform background subtration, pupil centration, and then use get_slopes()
to perform the aperture placement and spot centroiding.
"""
data, hdr = self.process_image(fitsfile)
plot = plot and self.plot
# flip data up/down if we need to. only binospec needs to currently.
if flipud or self.get_flipud(mode=mode):
data = np.flipud(data)
# flip left/right if we need to. no mode currently does, but who knows what the future holds.
if fliplr or self.get_fliplr(mode=mode):
data = np.fliplr(data)
if mode is None:
mode = self.get_mode(hdr)
if mode not in self.modes:
msg = "Invalid mode, %s, for WFS system, %s." % (mode, self.__class__.__name__)
raise WFSConfigException(value=msg)
# if available, get the rotator angle out of the header
if 'ROT' in hdr:
rotator = hdr['ROT'] * u.deg
else:
rotator = 0.0 * u.deg
# if there's a ROTOFF in the image header, grab it and adjust the rotator angle accordingly
if 'ROTOFF' in hdr:
rotator -= hdr['ROTOFF'] * u.deg
# make mask for finding wfs spot pattern
pup_mask = self.pupil_mask(hdr=hdr)
# get adjusted reference center position and update the reference
xcen, ycen = self.ref_pupil_location(mode, hdr=hdr)
self.modes[mode]['reference'].adjust_center(xcen, ycen)
# apply pupil to the reference
self.modes[mode]['reference'].apply_pupil(self.pup_inner, self.pup_size/2.)
ref_zv = self.reference_aberrations(mode, hdr=hdr)
zref = ref_zv.array
if len(zref) < self.nzern:
pad = np.zeros(self.nzern - len(zref))
zref = np.hstack((zref, pad))
try:
slope_results = get_slopes(
data,
self.modes[mode]['reference'],
pup_mask,
fwhm=self.find_fwhm,
thresh=self.find_thresh,
cen=self.cor_coords,
cen_thresh=self.cen_thresh,
cen_sigma=self.cen_sigma,
cen_tol=self.cen_tol,
plot=plot
)
slopes = slope_results['slopes']
coords = slope_results['pup_coords']
ref_pup_coords = self.modes[mode]['reference'].pup_coords(self.pup_size/2.)
rho, phi = cart2pol(ref_pup_coords)
ref_slopes = -(1. / self.tiltfactor) * np.array(zernike_slopes(ref_zv, rho, phi))
aps = slope_results['src_aps']
ref_mask = slope_results['ref_mask']
src_mask = slope_results['src_mask']
figures = slope_results['figures']
except WFSAnalysisFailed as e:
log.warning(f"Wavefront slope measurement failed: {e}")
slope_fig = None
if plot:
slope_fig, ax = plt.subplots()
slope_fig.set_label("WFS Image")
norm = wfs_norm(data)
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
results = {}
results['slopes'] = None
results['figures'] = {}
results['mode'] = mode
results['figures']['slopes'] = slope_fig
return results
except Exception as e:
raise WFSAnalysisFailed(value=str(e))
# use the average width of the spots to estimate the seeing and use the airmass to extrapolate to zenith seeing
if 'AIRMASS' in hdr:
airmass = hdr['AIRMASS']
else:
airmass = None
seeing, raw_seeing = self.seeing(mode=mode, sigma=slope_results['spot_sigma'], airmass=airmass)
if plot:
sub_slopes = slopes - ref_slopes
x = aps.positions.transpose()[0][src_mask]
y = aps.positions.transpose()[1][src_mask]
uu = sub_slopes[0][ref_mask]
vv = sub_slopes[1][ref_mask]
norm = wfs_norm(data)
figures['slopes'].set_label("Aperture Positions and Spot Movement")
ax = figures['slopes'].axes[0]
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
aps.plot(color='blue', axes=ax)
ax.quiver(x, y, uu, vv, scale_units='xy', scale=0.2, pivot='tip', color='red')
xl = [0.1*data.shape[1]]
yl = [0.95*data.shape[0]]
ul = [1.0/self.pix_size.value]
vl = [0.0]
ax.quiver(xl, yl, ul, vl, scale_units='xy', scale=0.2, pivot='tip', color='red')
ax.scatter([slope_results['center'][0]], [slope_results['center'][1]])
ax.text(0.12*data.shape[1], 0.95*data.shape[0], "1{0:unicode}".format(u.arcsec), verticalalignment='center')
ax.set_title("Seeing: %.2f\" (%.2f\" @ zenith)" % (raw_seeing.value, seeing.value))
results = {}
results['seeing'] = seeing
results['raw_seeing'] = raw_seeing
results['slopes'] = slopes
results['ref_slopes'] = ref_slopes
results['ref_zv'] = ref_zv
results['spots'] = slope_results['spots']
results['pup_coords'] = coords
results['ref_pup_coords'] = ref_pup_coords
results['apertures'] = aps
results['xspacing'] = slope_results['spacing'][0]
results['yspacing'] = slope_results['spacing'][1]
results['xcen'] = slope_results['center'][0]
results['ycen'] = slope_results['center'][1]
results['pup_mask'] = pup_mask
results['data'] = data
results['header'] = hdr
results['rotator'] = rotator
results['mode'] = mode
results['ref_mask'] = ref_mask
results['src_mask'] = src_mask
results['fwhm'] = stats.funcs.gaussian_sigma_to_fwhm * slope_results['spot_sigma']
results['figures'] = figures
results['grid_fit'] = slope_results['grid_fit']
return results
def fit_wavefront(self, slope_results, plot=True):
"""
Use results from self.measure_slopes() to fit a set of zernike polynomials to the wavefront shape.
"""
plot = plot and self.plot
if slope_results['slopes'] is not None:
results = {}
slopes = -self.tiltfactor * slope_results['slopes']
coords = slope_results['ref_pup_coords']
rho, phi = cart2pol(coords)
zref = slope_results['ref_zv']
params = make_init_pars(nmodes=self.nzern, init_zv=zref)
results['fit_report'] = lmfit.minimize(slope_diff, params, args=(coords, slopes))
zfit = ZernikeVector(coeffs=results['fit_report'])
results['raw_zernike'] = zfit
# derotate the zernike solution to match the primary mirror coordinate system
total_rotation = self.rotation - slope_results['rotator']
zv_rot = ZernikeVector(coeffs=results['fit_report'])
zv_rot.rotate(angle=-total_rotation)
results['rot_zernike'] = zv_rot
# subtract the reference aberrations
zsub = zv_rot - zref
results['ref_zernike'] = zref
results['zernike'] = zsub
pred_slopes = np.array(zernike_slopes(zfit, rho, phi))
diff = slopes - pred_slopes
diff_pix = diff / self.tiltfactor
rms = np.sqrt((diff[0]**2 + diff[1]**2).mean())
results['residual_rms_asec'] = rms / self.telescope.nmperasec * u.arcsec
results['residual_rms'] = rms * zsub.units
results['zernike_rms'] = zsub.rms
results['zernike_p2v'] = zsub.peak2valley
fig = None
if plot:
ref_mask = slope_results['ref_mask']
src_mask = slope_results['src_mask']
im = slope_results['data']
gnorm = wfs_norm(im)
fig, ax = plt.subplots()
fig.set_label("Zernike Fit Residuals")
ax.imshow(im, cmap='Greys', origin='lower', norm=gnorm, interpolation='None')
x = slope_results['apertures'].positions.transpose()[0][src_mask]
y = slope_results['apertures'].positions.transpose()[1][src_mask]
ax.quiver(x, y, diff_pix[0][ref_mask], diff_pix[1][ref_mask], scale_units='xy',
scale=0.05, pivot='tip', color='red')
xl = [0.1*im.shape[1]]
yl = [0.95*im.shape[0]]
ul = [0.2/self.pix_size.value]
vl = [0.0]
ax.quiver(xl, yl, ul, vl, scale_units='xy', scale=0.05, pivot='tip', color='red')
ax.text(0.12*im.shape[1], 0.95*im.shape[0], "0.2{0:unicode}".format(u.arcsec), verticalalignment='center')
ax.text(
0.95*im.shape[1],
0.95*im.shape[0],
"Residual RMS: {0.value:0.2f}{0.unit:unicode}".format(results['residual_rms_asec']),
verticalalignment='center',
horizontalalignment='right'
)
iq = np.sqrt(results['residual_rms_asec']**2 +
(results['zernike_rms'].value / self.telescope.nmperasec * u.arcsec)**2)
ax.set_title("Image Quality: {0.value:0.2f}{0.unit:unicode}".format(iq))
results['resid_plot'] = fig
else:
results = None
return results
def calculate_primary(self, zv, threshold=0.0 * u.nm, mask=[]):
"""
Calculate force corrections to primary mirror and any required focus offsets. Use threshold to determine which
terms in 'zv' to use in the force calculations. Any terms with normalized amplitude less than threshold will
not be used in the force calculation. In addition, individual terms can be forced to be masked.
"""
zv.denormalize()
zv_masked = ZernikeVector()
zv_norm = zv.copy()
zv_norm.normalize()
log.debug(f"thresh: {threshold} mask {mask}")
for z in zv:
if abs(zv_norm[z]) >= threshold:
zv_masked[z] = zv[z]
log.debug(f"{z}: Good")
else:
log.debug(f"{z}: Bad")
zv_masked.denormalize() # need to assure we're using fringe coeffs
log.debug(f"\nInput masked: {zv_masked}")
# use any available error bars to mask down to 1 sigma below amplitude or 0 if error bars are larger than amplitude.
for z in zv_masked:
frac_err = 1. - min(zv_masked.frac_error(key=z), 1.)
zv_masked[z] *= frac_err
log.debug(f"\nErrorbar masked: {zv_masked}")
forces, m1focus, zv_allmasked = self.telescope.calculate_primary_corrections(
zv=zv_masked,
mask=mask,
gain=self.m1_gain
)
log.debug(f"\nAll masked: {zv_allmasked}")
return forces, m1focus, zv_allmasked
def calculate_focus(self, zv):
"""
Convert Zernike defocus to um of secondary offset.
"""
z_denorm = zv.copy()
z_denorm.denormalize() # need to assure we're using fringe coeffs
frac_err = 1. - min(z_denorm.frac_error(key='Z04'), 1.)
foc_corr = -self.m2_gain * frac_err * z_denorm['Z04'] / self.secondary.focus_trans
return foc_corr.round(2)
def calculate_cc(self, zv):
"""
Convert Zernike coma (Z07 and Z08) into arcsec of secondary center-of-curvature tilts.
"""
z_denorm = zv.copy()
z_denorm.denormalize() # need to assure we're using fringe coeffs
# fix coma using tilts around the M2 center of curvature.
y_frac_err = 1. - min(z_denorm.frac_error(key='Z07'), 1.)
x_frac_err = 1. - min(z_denorm.frac_error(key='Z08'), 1.)
cc_y_corr = -self.m2_gain * y_frac_err * z_denorm['Z07'] / self.secondary.theta_cc
cc_x_corr = -self.m2_gain * x_frac_err * z_denorm['Z08'] / self.secondary.theta_cc
return cc_x_corr.round(3), cc_y_corr.round(3)
def calculate_recenter(self, fit_results, defoc=1.0):
"""
Perform zero-coma hexapod tilts to align the pupil center to the center-of-rotation.
The location of the CoR is configured to be at self.cor_coords.
"""
xc = fit_results['xcen']
yc = fit_results['ycen']
xref = self.cor_coords[0]
yref = self.cor_coords[1]
dx = xc - xref
dy = yc - yref
total_rotation = u.Quantity(self.rotation - fit_results['rotator'], u.rad).value
dr, phi = cart2pol([dx, dy])
derot_phi = phi + total_rotation
az, el = pol2cart([dr, derot_phi])
az *= self.az_parity * self.pix_size * defoc # pix size scales with the pupil size as focus changes.
el *= self.el_parity * self.pix_size * defoc
return az.round(3), el.round(3)
def clear_m1_corrections(self):
"""
Clear corrections applied to the primary mirror. This includes the 'm1spherical' offsets sent to the secondary.
"""
log.info("Clearing WFS corrections from M1 and m1spherical offsets from M2.")
clear_forces, clear_m1focus = self.telescope.clear_forces()
return clear_forces, clear_m1focus
def clear_m2_corrections(self):
"""
Clear corrections sent to the secondary mirror, specifically the 'wfs' offsets.
"""
log.info("Clearing WFS offsets from M2's hexapod.")
cmds = self.secondary.clear_wfs()
return cmds
def clear_corrections(self):
"""
Clear all applied WFS corrections
"""
forces, m1focus = self.clear_m1_corrections()
cmds = self.clear_m2_corrections()
return forces, m1focus, cmds
def connect(self):
"""
Set state to connected
"""
self.telescope.connect()
self.secondary.connect()
if self.telescope.connected and self.secondary.connected:
self.connected = True
else:
self.connected = False
def disconnect(self):
"""
Set state to disconnected
"""
self.telescope.disconnect()
self.secondary.disconnect()
self.connected = False
class F9(WFS):
"""
Defines configuration and methods specific to the F/9 WFS system
"""
def __init__(self, config={}, plot=True):
super(F9, self).__init__(config=config, plot=plot)
self.connected = False
# set up CompMirror object
self.compmirror = CompMirror()
def connect(self):
"""
Run parent connect() method and then connect to the topbox if we can connect to the rest.
"""
super(F9, self).connect()
if self.connected:
self.compmirror.connect()
def disconnect(self):
"""
Run parent disconnect() method and then disconnect the topbox
"""
super(F9, self).disconnect()
self.compmirror.disconnect()
class NewF9(F9):
"""
Defines configuration and methods specific to the F/9 WFS system with the new SBIG CCD
"""
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
cr_mask, data = detect_cosmics(rawdata, sigclip=15., niter=5, cleantype='medmask', psffwhm=10.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=7, dilate_size=13)
bkg = photutils.Background2D(data, (50, 50), filter_size=(15, 15), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
class F5(WFS):
"""
Defines configuration and methods specific to the F/5 WFS systems
"""
def __init__(self, config={}, plot=True):
super(F5, self).__init__(config=config, plot=plot)
self.connected = False
self.sock = None
# load lookup table for off-axis aberrations
self.aberr_table = ascii.read(self.aberr_table_file)
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
trimdata = self.trim_overscan(rawdata, hdr=hdr)
cr_mask, data = detect_cosmics(trimdata, sigclip=15., niter=5, cleantype='medmask', psffwhm=10.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=5, dilate_size=11)
bkg = photutils.Background2D(data, (20, 20), filter_size=(10, 10), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
def ref_pupil_location(self, mode, hdr=None):
"""
For now we set the F/5 wfs center by hand based on engineering data. Should determine this more carefully.
"""
x = 262.0
y = 259.0
return x, y
def focal_plane_position(self, hdr):
"""
Need to fill this in for the hecto f/5 WFS system. For now will assume it's always on-axis.
"""
return 0.0 * u.deg, 0.0 * u.deg
def calculate_recenter(self, fit_results, defoc=1.0):
"""
Perform zero-coma hexapod tilts to align the pupil center to the center-of-rotation.
The location of the CoR is configured to be at self.cor_coords.
"""
xc = fit_results['xcen']
yc = fit_results['ycen']
xref = self.cor_coords[0]
yref = self.cor_coords[1]
dx = xc - xref
dy = yc - yref
cam_rotation = self.rotation - 90 * u.deg # pickoff plus fold mirror makes a 90 deg rotation
total_rotation = u.Quantity(cam_rotation - fit_results['rotator'], u.rad).value
dr, phi = cart2pol([dx, -dy]) # F/5 camera needs an up/down flip
derot_phi = phi + total_rotation
az, el = pol2cart([dr, derot_phi])
az *= self.az_parity * self.pix_size * defoc # pix size scales with the pupil size as focus changes.
el *= self.el_parity * self.pix_size * defoc
return az.round(3), el.round(3)
def reference_aberrations(self, mode, hdr=None):
"""
Create reference ZernikeVector for 'mode'. Pass 'hdr' to self.focal_plane_position() to get position of
the WFS when the data was acquired.
"""
# for most cases, this gets the reference focus
z_default = ZernikeVector(**self.modes[mode]['ref_zern'])
# now get the off-axis aberrations
z_offaxis = ZernikeVector()
if hdr is None:
log.warning("Missing WFS header. Assuming data is acquired on-axis.")
field_r = 0.0 * u.deg
field_phi = 0.0 * u.deg
else:
field_r, field_phi = self.focal_plane_position(hdr)
# ignore piston and x/y tilts
for i in range(4, 12):
k = "Z%02d" % i
z_offaxis[k] = np.interp(field_r.to(u.deg).value, self.aberr_table['field_r'], self.aberr_table[k]) * u.um
# remove the 90 degree offset between the MMT and zernike conventions and then rotate the offaxis aberrations
z_offaxis.rotate(angle=field_phi - 90. * u.deg)
z = z_default + z_offaxis
return z
class Binospec(F5):
"""
Defines configuration and methods specific to the Binospec WFS system. Binospec uses the same aberration table
as the F5 system so we inherit from that.
"""
def get_flipud(self, mode):
"""
Method to determine if the WFS image needs to be flipped up/down
During the first binospec commissioning run the images were flipped u/d as they came in. Since then, they are
left as-is and get flipped internally based on this flag. The reference file is already flipped.
"""
return True
def ref_pupil_location(self, mode, hdr=None):
"""
If a header is passed in, use Jan Kansky's linear relations to get the pupil center on the reference image.
Otherwise, use the default method.
"""
if hdr is None:
ref = self.modes[mode]['reference']
x = ref.xcen
y = ref.ycen
else:
for k in ['STARXMM', 'STARYMM']:
if k not in hdr:
# we'll be lenient for now with missing header info. if not provided, assume we're on-axis.
msg = f"Missing value, {k}, that is required to transform Binospec guider coordinates. Defaulting to 0.0."
log.warning(msg)
hdr[k] = 0.0
y = 232.771 + 0.17544 * hdr['STARXMM']
x = 265.438 + -0.20406 * hdr['STARYMM'] + 12.0
return x, y
def focal_plane_position(self, hdr):
"""
Transform from the Binospec guider coordinate system to MMTO focal plane coordinates.
"""
for k in ['ROT', 'STARXMM', 'STARYMM']:
if k not in hdr:
# we'll be lenient for now with missing header info. if not provided, assume we're on-axis.
msg = f"Missing value, {k}, that is required to transform Binospec guider coordinates. Defaulting to 0.0."
log.warning(msg)
hdr[k] = 0.0
guide_x = hdr['STARXMM']
guide_y = hdr['STARYMM']
rot = hdr['ROT']
guide_r = np.sqrt(guide_x**2 + guide_y**2) * u.mm
rot = u.Quantity(rot, u.deg) # make sure rotation is cast to degrees
# the MMTO focal plane coordinate convention has phi=0 aligned with +Y instead of +X
if guide_y != 0.0:
guide_phi = np.arctan2(guide_x, guide_y) * u.rad
else:
guide_phi = 90. * u.deg
# transform radius in guider coords to degrees in focal plane
focal_r = (guide_r / self.secondary.plate_scale).to(u.deg)
focal_phi = guide_phi + rot + self.rotation
log.debug(f"guide_phi: {guide_phi.to(u.rad)} rot: {rot}")
return focal_r, focal_phi
def in_wfs_region(self, xw, yw, x, y):
"""
Determine if a position is within the region available to Binospec's WFS
"""
return True # placekeeper until the optical prescription is implemented
def pupil_mask(self, hdr, npts=14):
"""
Generate a synthetic pupil mask
"""
if hdr is not None:
x_wfs = hdr.get('STARXMM', 150.0)
y_wfs = hdr.get('STARYMM', 0.0)
else:
x_wfs = 150.0
y_wfs = 0.0
log.warning("Header information not available for Binospec pupil mask. Assuming default position.")
good = []
center = self.pup_size / 2.
obsc = self.telescope.obscuration.value
spacing = 2.0 / npts
for x in np.arange(-1, 1, spacing):
for y in np.arange(-1, 1, spacing):
r = np.hypot(x, y)
if (r < 1 and np.hypot(x, y) >= obsc):
if self.in_wfs_region(x_wfs, y_wfs, x, y):
x_impos = center * (x + 1.)
y_impos = center * (y + 1.)
amp = 1.
# this is kind of a hacky way to dim spots near the edge, but easier than doing full calc
# of the aperture intersection with pupil. it also doesn't need to be that accurate for the
# purposes of the cross-correlation used to register the pupil.
if r > 1. - spacing:
amp = 1. - (r - (1. - spacing)) / spacing
if r - obsc < spacing:
amp = (r - obsc) / spacing
good.append((amp, x_impos, y_impos))
yi, xi = np.mgrid[0:self.pup_size, 0:self.pup_size]
im = np.zeros((self.pup_size, self.pup_size))
sigma = 3.
for g in good:
im += Gaussian2D(g[0], g[1], g[2], sigma, sigma)(xi, yi)
# Measured by hand from reference LED image
cam_rot = 0.595
im_rot = rotate(im, cam_rot, reshape=False)
im_rot[im_rot < 1e-2] = 0.0
return im_rot
class MMIRS(F5):
"""
Defines configuration and methods specific to the MMIRS WFS system
"""
def __init__(self, config={}, plot=True):
super(MMIRS, self).__init__(config=config, plot=plot)
# Parameters describing MMIRS pickoff mirror geometry
# Location and diameter of exit pupil
# Determined by tracing chief ray at 7.2' field angle with mmirs_asbuiltoptics_20110107_corronly.zmx
self.zp = 71.749 / 0.02714
self.dp = self.zp / 5.18661 # Working f/# from Zemax file
# Location of fold mirror
self.zm = 114.8
# Angle of fold mirror
self.am = 42 * u.deg
# Following dimensions from drawing MMIRS-1233_Rev1.pdf
# Diameter of pickoff mirror
self.pickoff_diam = (6.3 * u.imperial.inch).to(u.mm).value
# X size of opening in pickoff mirror
self.pickoff_xsize = (3.29 * u.imperial.inch).to(u.mm).value
# Y size of opening in pickoff mirror
self.pickoff_ysize = (3.53 * u.imperial.inch).to(u.mm).value
# radius of corner in pickoff mirror
self.pickoff_rcirc = (0.4 * u.imperial.inch).to(u.mm).value
def mirrorpoint(self, x0, y0, x, y):
"""
Compute intersection of ray with pickoff mirror.
The ray leaves the exit pupil at position x,y and hits the focal surface at x0,y0.
Math comes from http://geomalgorithms.com/a05-_intersect-1.html
"""
# Point in focal plane
P0 = np.array([x0, y0, 0])
# Point in exit pupil
P1 = np.array([x * self.dp / 2, y * self.dp / 2, self.zp])
# Pickoff mirror intesection with optical axis
V0 = np.array([0, 0, self.zm])
# normal to mirror
if (x0 < 0):
n = np.array([-np.sin(self.am), 0, np.cos(self.am)])
else:
n = np.array([np.sin(self.am), 0, np.cos(self.am)])
w = P0 - V0
# Vector connecting P0 to P1
u = P1 - P0
# Distance from P0 to intersection as a fraction of abs(u)
s = -n.dot(w) / n.dot(u)
# Intersection point on mirror
P = P0 + s * u
return (P[0], P[1])
def onmirror(self, x, y, side):
"""
Determine if a point is on the pickoff mirror surface:
x,y = coordinates of ray
side=1 means right face of the pickoff mirror, -1=left face
"""
if np.hypot(x, y) > self.pickoff_diam / 2.:
return False
if x * side < 0:
return False
x = abs(x)
y = abs(y)
if ((x > self.pickoff_xsize/2) or (y > self.pickoff_ysize/2)
or (x > self.pickoff_xsize/2 - self.pickoff_rcirc and y > self.pickoff_ysize/2 - self.pickoff_rcirc
and np.hypot(x - (self.pickoff_xsize/2 - self.pickoff_rcirc),
y - (self.pickoff_ysize/2 - self.pickoff_rcirc)) > self.pickoff_rcirc)):
return True
else:
return False
def drawoutline(self, ax):
"""
Draw outline of MMIRS pickoff mirror onto matplotlib axis, ax
"""
circ = np.arange(360) * u.deg
ax.plot(np.cos(circ) * self.pickoff_diam/2, np.sin(circ) * self.pickoff_diam/2, "b")
ax.set_aspect('equal', 'datalim')
ax.plot(
[-(self.pickoff_xsize/2 - self.pickoff_rcirc), (self.pickoff_xsize/2 - self.pickoff_rcirc)],
[self.pickoff_ysize/2, self.pickoff_ysize/2],
"b"
)
ax.plot(
[-(self.pickoff_xsize/2 - self.pickoff_rcirc), (self.pickoff_xsize/2 - self.pickoff_rcirc)],
[-self.pickoff_ysize/2, -self.pickoff_ysize/2],
"b"
)
ax.plot(
[-(self.pickoff_xsize/2), -(self.pickoff_xsize/2)],
[self.pickoff_ysize/2 - self.pickoff_rcirc, -(self.pickoff_ysize/2 - self.pickoff_rcirc)],
"b"
)
ax.plot(
[(self.pickoff_xsize/2), (self.pickoff_xsize/2)],
[self.pickoff_ysize/2 - self.pickoff_rcirc, -(self.pickoff_ysize/2 - self.pickoff_rcirc)],
"b"
)
ax.plot(
np.cos(circ[0:90]) * self.pickoff_rcirc + self.pickoff_xsize/2 - self.pickoff_rcirc,
np.sin(circ[0:90]) * self.pickoff_rcirc + self.pickoff_ysize/2 - self.pickoff_rcirc,
"b"
)
ax.plot(
np.cos(circ[90:180]) * self.pickoff_rcirc - self.pickoff_xsize/2 + self.pickoff_rcirc,
np.sin(circ[90:180]) * self.pickoff_rcirc + self.pickoff_ysize/2 - self.pickoff_rcirc,
"b"
)
ax.plot(
np.cos(circ[180:270]) * self.pickoff_rcirc - self.pickoff_xsize/2 + self.pickoff_rcirc,
np.sin(circ[180:270]) * self.pickoff_rcirc - self.pickoff_ysize/2 + self.pickoff_rcirc,
"b"
)
ax.plot(
np.cos(circ[270:360]) * self.pickoff_rcirc + self.pickoff_xsize/2 - self.pickoff_rcirc,
np.sin(circ[270:360]) * self.pickoff_rcirc - self.pickoff_ysize/2 + self.pickoff_rcirc,
"b"
)
ax.plot([0, 0], [self.pickoff_ysize/2, self.pickoff_diam/2], "b")
ax.plot([0, 0], [-self.pickoff_ysize/2, -self.pickoff_diam/2], "b")
def plotgrid(self, x0, y0, ax, npts=15):
"""
Plot a grid of points representing Shack-Hartmann apertures corresponding to wavefront sensor positioned at
a focal plane position of x0, y0 mm. This position is written in the FITS header keywords GUIDERX and GUIDERY.
"""
ngood = 0
for x in np.arange(-1, 1, 2.0 / npts):
for y in np.arange(-1, 1, 2.0 / npts):
if (np.hypot(x, y) < 1 and | np.hypot(x, y) | numpy.hypot |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 the HERA Project
# Licensed under the MIT License
import pytest
import os
import shutil
import hera_qm.xrfi as xrfi
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData
from pyuvdata import UVCal
import hera_qm.utils as utils
from hera_qm.data import DATA_PATH
from pyuvdata import UVFlag
import glob
test_d_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA')
test_uvfits_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.uvfits')
test_uvh5_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvh5')
test_c_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.omni.calfits')
test_f_file = test_d_file + '.testuvflag.h5'
test_f_file_flags = test_d_file + '.testuvflag.flags.h5' # version in 'flag' mode
test_outfile = os.path.join(DATA_PATH, 'test_output', 'uvflag_testout.h5')
xrfi_path = os.path.join(DATA_PATH, 'test_output')
test_flag_integrations= os.path.join(DATA_PATH, 'a_priori_flags_integrations.yaml')
test_flag_jds= os.path.join(DATA_PATH, 'a_priori_flags_jds.yaml')
test_flag_lsts= os.path.join(DATA_PATH, 'a_priori_flags_lsts.yaml')
test_uvh5_files = ['zen.2457698.40355191.xx.HH.uvh5',
'zen.2457698.40367619.xx.HH.uvh5',
'zen.2457698.40380046.xx.HH.uvh5']
test_c_files = ['zen.2457698.40355191.xx.HH.uvcAA.omni.calfits',
'zen.2457698.40367619.xx.HH.uvcAA.omni.calfits',
'zen.2457698.40380046.xx.HH.uvcAA.omni.calfits']
for cnum, cf, uvf in zip(range(3), test_c_files, test_uvh5_files):
test_c_files[cnum] = os.path.join(DATA_PATH, cf)
test_uvh5_files[cnum] = os.path.join(DATA_PATH, uvf)
pytestmark = pytest.mark.filterwarnings(
"ignore:The uvw_array does not match the expected values given the antenna positions.",
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA."
)
def test_uvdata():
uv = UVData()
uv.read_miriad(test_d_file)
xant = uv.get_ants()[0]
xrfi.flag_xants(uv, xant)
assert np.all(uv.flag_array[uv.ant_1_array == xant, :, :, :])
assert np.all(uv.flag_array[uv.ant_2_array == xant, :, :, :])
def test_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
xant = uvc.ant_array[0]
xrfi.flag_xants(uvc, xant)
assert np.all(uvc.flag_array[0, :, :, :, :])
def test_uvflag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
xant = uvf.ant_1_array[0]
xrfi.flag_xants(uvf, xant)
assert np.all(uvf.flag_array[uvf.ant_1_array == xant, :, :, :])
assert np.all(uvf.flag_array[uvf.ant_2_array == xant, :, :, :])
def test_input_error():
pytest.raises(ValueError, xrfi.flag_xants, 4, 0)
def test_uvflag_waterfall_error():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf.to_flag()
pytest.raises(ValueError, xrfi.flag_xants, uvf, 0)
def test_uvflag_not_flag_error():
uvf = UVFlag(test_f_file)
pytest.raises(ValueError, xrfi.flag_xants, uvf, 0)
def test_not_inplace_uvflag():
uvf = UVFlag(test_f_file)
xant = uvf.ant_1_array[0]
uvf2 = xrfi.flag_xants(uvf, xant, inplace=False)
assert np.all(uvf2.flag_array[uvf2.ant_1_array == xant, :, :, :])
assert np.all(uvf2.flag_array[uvf2.ant_2_array == xant, :, :, :])
def test_not_inplace_uvdata():
uv = UVData()
uv.read_miriad(test_d_file)
xant = uv.get_ants()[0]
uv2 = xrfi.flag_xants(uv, xant, inplace=False)
assert np.all(uv2.flag_array[uv2.ant_1_array == xant, :, :, :])
assert np.all(uv2.flag_array[uv2.ant_2_array == xant, :, :, :])
def test_resolve_xrfi_path_given():
dirname = xrfi.resolve_xrfi_path(xrfi_path, test_d_file)
assert xrfi_path == dirname
def test_resolve_xrfi_path_empty():
dirname = xrfi.resolve_xrfi_path('', test_d_file)
assert os.path.dirname(os.path.abspath(test_d_file)) == dirname
def test_resolve_xrfi_path_does_not_exist():
dirname = xrfi.resolve_xrfi_path(os.path.join(xrfi_path, 'foogoo'), test_d_file)
assert os.path.dirname(os.path.abspath(test_d_file)) == dirname
def test_resolve_xrfi_path_jd_subdir():
dirname = xrfi.resolve_xrfi_path('', test_d_file, jd_subdir=True)
expected_dir = os.path.join(os.path.dirname(os.path.abspath(test_d_file)),
'.'.join(os.path.basename(test_d_file).split('.')[0:3])
+ '.xrfi')
assert dirname == expected_dir
assert os.path.exists(expected_dir)
shutil.rmtree(expected_dir)
def test_check_convolve_dims_3D():
# Error if d.ndims != 2
pytest.raises(ValueError, xrfi._check_convolve_dims, np.ones((3, 2, 3)), 1, 2)
def test_check_convolve_dims_1D():
size = 10
d = np.ones(size)
with uvtest.check_warnings(
UserWarning,
match=f"K1 value {size + 1} is larger than the data",
nwarnings=1
):
K = xrfi._check_convolve_dims(d, size + 1)
assert K == size
def test_check_convolve_dims_kernel_not_given():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=["No K1 input provided.", "No K2 input provided"],
nwarnings=2
):
K1, K2 = xrfi._check_convolve_dims(d)
assert K1 == size
assert K2 == size
def test_check_convolve_dims_Kt_too_big():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=f"K1 value {size + 1} is larger than the data",
nwarnings=1,
):
Kt, Kf = xrfi._check_convolve_dims(d, size + 1, size)
assert Kt == size
assert Kf == size
def test_check_convolve_dims_Kf_too_big():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=f"K2 value {size + 1} is larger than the data",
nwarnings=1,
):
Kt, Kf = xrfi._check_convolve_dims(d, size, size + 1)
assert Kt == size
assert Kf == size
def test_check_convolve_dims_K1K2_lt_one():
size = 10
data = np.ones((size, size))
pytest.raises(ValueError, xrfi._check_convolve_dims, data, 0, 2)
pytest.raises(ValueError, xrfi._check_convolve_dims, data, 2, 0)
def test_robus_divide():
a = np.array([1., 1., 1.], dtype=np.float32)
b = np.array([2., 0., 1e-9], dtype=np.float32)
c = xrfi.robust_divide(a, b)
assert np.array_equal(c, np.array([1. / 2., np.inf, np.inf]))
@pytest.fixture(scope='function')
def fake_data():
size = 100
fake_data = np.zeros((size, size))
# yield returns the data and lets us do post test clean up after
yield fake_data
# post-test clean up
del(fake_data)
return
def test_medmin(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# medmin should be .size - 1 for these data
medmin = xrfi.medmin(fake_data)
assert np.allclose(medmin, fake_data.shape[0] - 1)
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.medmin, np.ones((5, 4, 3)))
def test_medminfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# run medmin filt
Kt = 8
Kf = 8
d_filt = xrfi.medminfilt(fake_data, Kt=Kt, Kf=Kf)
# build up "answer" array
ans = np.zeros_like(fake_data)
for i in range(fake_data.shape[1]):
if i < fake_data.shape[0] - Kf:
ans[:, i] = i + (Kf - 1)
else:
ans[:, i] = fake_data.shape[0] - 1
assert np.allclose(d_filt, ans)
def test_detrend_deriv(fake_data):
# make fake data
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = j * i**2 + j**3
# run detrend_deriv in both dimensions
dtdf = xrfi.detrend_deriv(fake_data, df=True, dt=True)
ans = np.ones_like(dtdf)
assert np.allclose(dtdf, ans)
# only run along frequency
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = j**3
df = xrfi.detrend_deriv(fake_data, df=True, dt=False)
ans = np.ones_like(df)
assert np.allclose(df, ans)
# only run along time
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = i**3
dt = xrfi.detrend_deriv(fake_data, df=False, dt=True)
ans = np.ones_like(dt)
assert np.allclose(dt, ans)
# catch error of df and dt both being False
pytest.raises(ValueError, xrfi.detrend_deriv, fake_data, dt=False, df=False)
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.detrend_deriv, np.ones((5, 4, 3)))
def test_detrend_medminfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# run detrend_medminfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_medminfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==8, Kf==8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medminfilt_ans.txt')
ans = np.loadtxt(ans_fn)
assert np.allclose(ans, dm)
def test_detrend_medfilt():
# make fake data
x = np.sin(np.linspace(0, 2.1 * np.pi, 100))
y = np.cos(np.linspace(0, 5.3 * np.pi, 100))
fake_data = np.outer(x,y)
# run detrend medfilt
Kt = 101
Kf = 101
with uvtest.check_warnings(
UserWarning,
match=[
f"K1 value {Kt} is larger than the data",
f"K2 value {Kf} is larger than the data",
],
nwarnings=2,
):
dm = xrfi.detrend_medfilt(fake_data, None, Kt, Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==101, Kf==101
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medfilt_ans_v2.txt')
ans = np.loadtxt(ans_fn)
np.testing.assert_array_almost_equal(ans, dm)
def test_detrend_medfilt_complex():
# use complex data
x = np.sin(np.linspace(0, 2.1 * np.pi, 100)) + 1.0j * np.cos(np.linspace(0, 1.3 * np.pi, 100))
y = np.cos(np.linspace(0, 5.3 * np.pi, 100)) + 1.0j * np.sin(np.linspace(0, 2.9 * np.pi, 100))
fake_data = np.outer(x,y)
# run detrend_medfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_medfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size=100, Kt=8, Kf=8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medfilt_complex_ans_v2.txt')
ans = np.loadtxt(ans_fn).view('complex')
np.testing.assert_array_almost_equal(ans, dm)
def test_detrend_medfilt_3d_error():
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.detrend_medfilt, np.ones((5, 4, 3)))
def test_detrend_meanfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i**2 * np.ones_like(fake_data[:, i])
# run detrend medfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_meanfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==8, Kf==8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_meanfilt_ans.txt')
ans = np.loadtxt(ans_fn)
assert np.allclose(ans, dm)
def test_detrend_meanfilt_flags(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
ind = int(fake_data.shape[0] / 2)
fake_data[ind, :] = 10000.
flags = np.zeros(fake_data.shape, dtype=np.bool_)
flags[ind, :] = True
# run detrend medfilt
Kt = 8
Kf = 8
dm1 = xrfi.detrend_meanfilt(fake_data, flags=flags, Kt=Kt, Kf=Kf)
# Compare with drastically different flagged values
fake_data[ind, :] = 0
dm2 = xrfi.detrend_meanfilt(fake_data, flags=flags, Kt=Kt, Kf=Kf)
dm2[ind, :] = dm1[ind, :] # These don't have valid values, so don't compare them.
assert np.allclose(dm1, dm2)
def test_zscore_full_array(fake_data):
# Make some fake data
np.random.seed(182)
fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1])
out = xrfi.zscore_full_array(fake_data)
fake_mean = np.mean(fake_data)
fake_std = np.std(fake_data)
assert np.all(out == (fake_data - fake_mean) / fake_std)
def test_zscore_full_array_flags(fake_data):
# Make some fake data
np.random.seed(182)
fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1])
flags = np.zeros(fake_data.shape, dtype=np.bool_)
flags[45, 33] = True
out = xrfi.zscore_full_array(fake_data, flags=flags)
fake_mean = np.mean(np.ma.masked_array(fake_data, flags))
fake_std = np.std(np.ma.masked_array(fake_data, flags))
out_exp = (fake_data - fake_mean) / fake_std
out_exp[45, 33] = np.inf
assert np.all(out == out_exp)
def test_zscore_full_array_modified(fake_data):
# Make some fake data
np.random.seed(182)
fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1])
out = xrfi.zscore_full_array(fake_data, modified=True)
fake_med = np.median(fake_data)
fake_mad = np.median(np.abs(fake_data - fake_med))
assert np.all(out == (fake_data - fake_med) / (1.486 * fake_mad))
def test_zscore_full_array_modified_complex(fake_data):
# Make some fake data
np.random.seed(182)
rands = np.random.randn(100, 100)
fake_data = rands + 1j * rands
out = xrfi.zscore_full_array(fake_data, modified=True)
fake_med = np.median(rands)
fake_mad = np.sqrt(2) * np.median(np.abs(rands - fake_med))
assert np.allclose(out, (fake_data - fake_med - 1j * fake_med) / (1.486 * fake_mad))
def test_modzscore_1d_no_detrend():
npix = 1000
np.random.seed(182)
data = np.random.randn(npix)
data[50] = 500
out = xrfi.modzscore_1d(data, detrend=False)
assert out.shape == (npix,)
assert np.isclose(out[50], 500, rtol=.2)
assert np.isclose(np.median(np.abs(out)), .67, rtol=.1)
def test_modzscore_1d():
npix = 1000
np.random.seed(182)
data = np.random.randn(npix)
data[50] = 500
data += .1 * np.arange(npix)
out = xrfi.modzscore_1d(data)
assert out.shape == (npix,)
assert np.isclose(out[50], 500, rtol=.2)
assert np.isclose(np.median(np.abs(out)), .67, rtol=.1)
def test_watershed_flag():
# generate a metrics and flag UVFlag object
uv = UVData()
uv.read_miriad(test_d_file)
uvm = UVFlag(uv, history='I made this')
uvf = UVFlag(uv, mode='flag')
# set metric and flag arrays to specific values
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[0, 0, 1, 0] = 7.
uvf.flag_array[0, 0, 0, 0] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[0, 0, :2, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging channels adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[:, :, 1, :] = 1.
uvf.flag_array[:, :, 0, :] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[:, :, :2, :] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging times adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
times = np.unique(uv.time_array)
inds1 = np.where(uv.time_array == times[0])[0]
inds2 = np.where(uv.time_array == times[1])[0]
uvm.metric_array[inds2, 0, :, 0] = 1.
uvf.flag_array[inds1, 0, :, 0] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[inds1, 0, :, 0] = True
flag_array[inds2, 0, :, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test antenna type objects
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvm = UVFlag(uvc, history='I made this')
uvf = UVFlag(uvc, mode='flag')
# set metric and flag arrays to specific values
uvm.metric_array = | np.zeros_like(uvm.metric_array) | numpy.zeros_like |
"""Unit test for the contents of regression_analysis.utils.findStat."""
import numpy as np
import sklearn.metrics as sm
from regression_analysis.fit_model import linear_regression
from regression_analysis.utils import franke, findStat
def get_model_output():
# Get data from Franke function
x1, x2, y = franke.create_data(num_points=100, noise_variance=0)
# Get design matrix
X = linear_regression.design_mat2D(np.squeeze(x1), np.squeeze(x2), 5)
# Get betas from scikit
beta_OLS = linear_regression.find_model_parameter(X, y, "scikit_ols", 0)
beta_RR = linear_regression.find_model_parameter(X, y, "scikit_ridge", 1)
beta_LR = linear_regression.find_model_parameter(X, y, "scikit_lasso", 1)
# Get y
y_OLS = X @ beta_OLS
y_RR = X @ beta_RR
y_LR = X @ beta_LR
return y, y_OLS, y_RR, y_LR
def test_findMSE():
"""Test if the MSE from our code is the same as the one from scikit-learn."""
y, y_OLS, y_RR, y_LR = get_model_output()
# Calculate mean squared error
MSE_OLS_own = findStat.findMSE(y, y_OLS)
MSE_RR_own = findStat.findMSE(y, y_RR)
MSE_LR_own = findStat.findMSE(y, y_LR)
MSE_OLS_scikit = sm.mean_squared_error(y, y_OLS)
MSE_RR_scikit = sm.mean_squared_error(y, y_RR)
MSE_LR_scikit = sm.mean_squared_error(y, y_LR)
np.testing.assert_array_equal(MSE_OLS_own, MSE_OLS_scikit)
| np.testing.assert_array_equal(MSE_RR_own, MSE_RR_scikit) | numpy.testing.assert_array_equal |
import cv2
import numpy as np
from scipy.ndimage.measurements import label
from code.features import FeatureExtractor
from collections import deque
HEAT_INCREMENT = 10
class VehicleDetector:
def __init__(self, svc, scaler, n_rows, n_cols, config, buffer_size = 8):
self.svc = svc
self.scaler = scaler
self.n_rows = n_rows
self.n_cols = n_cols
#self.orientations = config["orientations"]
self.pix_per_cell = config["pix_per_cell"]
self.cell_per_block = config["cell_per_block"]
self.spatial_size = config["spatial_size"]
self.histogram_bins = config["histogram_bins"]
self.window = config["window"]
n_rows_min = int(n_rows / 1.8)
n_cols_min = 100
self.search_parameters = [(n_rows_min, (n_rows_min + 200), n_cols // 2, n_cols, 1.5, 2),
(n_rows_min, (n_rows_min + 250), n_cols_min, n_cols, 2, 1)]
self.config = config
self.heatmap_buffer = deque(maxlen = buffer_size)
self.feature_extractor = FeatureExtractor(config)
def _image_region_search(self, image_region, v_min, h_min, scale, cells_per_step, cpu_pool = None):
if scale != 1.0:
if scale > 1.0:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LINEAR
image_region = cv2.resize(image_region, (np.int(image_region.shape[1] / scale), np.int(image_region.shape[0] / scale)), interpolation = interpolation)
n_hblocks = (image_region.shape[1] // self.pix_per_cell) - self.cell_per_block + 1
n_vblocks = (image_region.shape[0] // self.pix_per_cell) - self.cell_per_block + 1
n_blocks_per_window = (self.window // self.pix_per_cell) - self.cell_per_block + 1
h_steps = (n_hblocks - n_blocks_per_window) // cells_per_step + 1
v_steps = (n_vblocks - n_blocks_per_window) // cells_per_step + 1
windows = []
predictions = []
for h_step in range(h_steps):
for v_step in range(v_steps):
h_pos = h_step * cells_per_step
v_pos = v_step * cells_per_step
window_min_h = h_pos * self.pix_per_cell
window_min_v = v_pos * self.pix_per_cell
image_window = image_region[window_min_v:window_min_v + self.window , window_min_h:window_min_h + self.window]
if (image_window.shape[0] < self.window) or (image_window.shape[1] < self.window):
image_window = cv2.resize(image_window, (self.window , self.window ), interpolation = cv.INTER_LINEAR)
features = self.feature_extractor.extract_image_features(image_window, cpu_pool = cpu_pool)
features = self.scaler.transform(features.reshape(1, -1))
prediction = self.svc.predict(features)[0]
window_scale = np.int(self.window * scale)
top_left = (np.int(window_min_h * scale) + h_min, np.int(window_min_v * scale) + v_min)
bottom_right = (top_left[0] + window_scale, top_left[1] + window_scale)
windows.append((top_left, bottom_right))
predictions.append(prediction)
return windows, predictions
def _image_search(self, image, search_parameters, cpu_pool = None):
windows = []
predictions = []
for v_min, v_max, h_min, h_max, scale, cells_per_step in search_parameters:
image_region = image[v_min:v_max, h_min:h_max, :]
_windows, _predictions = self._image_region_search(image_region, v_min, h_min, scale, cells_per_step, cpu_pool = cpu_pool)
windows.append(_windows)
predictions.append(_predictions)
# Flatten lists
windows = [item for sublist in windows for item in sublist]
predictions = [item for sublist in predictions for item in sublist]
return windows, predictions
def _make_heatmap(self, windows, predictions):
heatmap = np.zeros((self.n_rows, self.n_cols), dtype = np.float)
n_samples = len(windows)
for i in range(n_samples):
if predictions[i] == 1:
window = windows[i]
heatmap[window[0][1]:window[1][1], window[0][0]:window[1][0]] += HEAT_INCREMENT
return heatmap
def _bounding_boxes(self, heatmap, min_width, min_height):
labels = label(heatmap)
bounding_boxes = []
for car_n in range(1, labels[1] + 1):
tmp = (labels[0] == car_n).nonzero()
nonzero_x = np.array(tmp[1])
nonzero_y = | np.array(tmp[0]) | numpy.array |
import numpy as np
def QRDecomposition(A):
n = np.shape(A)[0] #pegando o tamanho das linhas de A
m = np.shape(A)[1] #pegando o tamanho das colunas de A
Q = np.zeros((n,m)) #declarando a matriz Q
R = np.zeros((m,m)) #declarando a matriz R
for j in range(0, m):
A_column = A[:, j] #pegando as colunas da matriz A
V = np.zeros(n) #declarando o vetor V
V = A_column #V igual a coluna j de A
for i in range (0, j):
R[i,j] = Q[:,i].dot(A_column) #fazendo o calculo do R[i,j] = coluna i de Q * coluna j de A ( i != j)
V -= (Q[:,i].dot(A_column))*Q[:,i] #fazendo o calculo de V
R[j,j] = np.linalg.norm(V) # R[j,j] = norma da coluna j de A (i == j)
Q[:,j] = V/np.linalg.norm(V) #normalizando V e atribuindo a coluna j de Q
return Q, R
def decompLU(A):
L = np.eye(np.shape(A)[0]) #matriz L com a dimensao de A com a diagonal principla igual a 1
U = np.zeros( | np.shape(A) | numpy.shape |
#!/usr/bin/env python
#_*_coding:utf-8_*_
import numpy as np
import math
def Calculate_Fscore(array, labels):
if len(array) != len(labels):
print('Error. inconsistent data shape with sample number')
return 0
array_po = []
array_ne = []
for i in range(len(labels)):
if labels[i] == 1:
array_po.append(array[i])
else:
array_ne.append(array[i])
mean_po = sum(array_po) / len(array_po)
mean_ne = sum(array_ne) / len(array_ne)
mean = sum(array) / len(array)
score_1 = ((mean_po - mean) ** 2 + (mean_ne - mean) ** 2)
score_2 = sum([(i-mean_po) ** 2 for i in array_po]) / (len(array_po) - 1)
score_3 = sum([(i-mean_ne) ** 2 for i in array_ne]) / (len(array_ne) - 1)
f_score = score_1 / (score_2 + score_3)
return f_score
def Fscore(encodings, labels):
features = encodings[0][1:]
encodings = | np.array(encodings) | numpy.array |
#!/usr/bin/env python3
import os
import sys
import glob
import argparse
import logging
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.coordinates.name_resolve import NameResolveError
from astropy.time import Time
import astropy.constants as const
import astropy.units as u
import tqdm
from arts_tracking_beams import ARTSFITSReader, TrackingBeam
from arts_tracking_beams.tools import radec_to_hadec
from arts_tracking_beams.constants import NTAB, CB_HPBW, REF_FREQ, BMAX
def get_input_path(input_folder, taskid=None, cb=None):
"""
Get input file path
:param str input_folder: Folder containing FITS files
:param str taskid: observation taskid (required if more than one observation is present)
:param int cb: observation compound beam (required if more than one CB is present)
:return: file path formatted as <path>/ARTS<taskid>_CB<cb>_{tab:02d}.fits
"""
# construct glob pattern
pattern = f'{input_folder}/ARTS'
if taskid is not None:
pattern += f'{taskid}'
if cb is not None:
pattern += f'*CB{cb:02d}'
pattern += '*.fits'
# get list of files
files = glob.glob(pattern)
if not files:
logging.error("No input files found")
sys.exit(1)
# there should be one file per TAB
if not len(files) == NTAB:
logging.error(f'Expected {NTAB} files but found {len(files)}')
sys.exit(1)
# construct the file path with formatter for TAB index
# first sort so TAB00 file is first
pth = sorted(files)[0].replace('TAB00', 'TAB{tab:02d}')
logging.debug(f'Input FITS path: {pth}')
return pth
def get_source_coordinates(name=None, ra=None, dec=None):
"""
Get source coordinates
:param str name: source name (to resolve with SkyCoord.from_name)
:param str ra: Right Ascension (hh:mm:ss.s, ignored if name is given)
:param str dec: Declination (dd:mm:ss.s, ignored if name is given)
:return: source coordinates (SkyCoord)
"""
if name is not None:
try:
coord = SkyCoord.from_name(name)
except NameResolveError:
logging.error(f'Could not find coordinates for source {name}')
sys.exit(1)
else:
if ra is None or dec is None:
logging.error('RA and Dec must be specified if source name is not specified')
sys.exit(1)
coord = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
logging.debug(f'Source coordinates: {coord.to_string("hmsdms", precision=1)}')
return coord
def required_tb_resolution(pointing, tstart, duration, fhi):
"""
Calculate the required time interval for calculating the TB TAB indices.
This is set by the time required to cross a single TAB at 15"/second Earth rotation,
at the point in the observation where the TABs are narrowest (i.e. HA closest to zero)
Worst case is roughly 1.1 second, which nicely fits the typical FITS subint length of 1.024s
:param SkyCoord pointing: CB pointing
:param Time tstart: Observation start time
:param Quantity duration: Observation duration
:param Quantity fhi: Highest frequency of observation
:return: TB time resolution (Quantity)
"""
# get HA, Dec at start and end of observation
ha_s, dec_s = radec_to_hadec(pointing.ra, pointing.dec, tstart)
ha_e, dec_e = radec_to_hadec(pointing.ra, pointing.dec, tstart + duration)
# if one HA is negative and the other positive, the minimum HA is zero
if (ha_s * ha_e).value < 0:
ha = 0 * u.deg
else:
# find value closest to zero, sign does not matter as projection effect is symmetric around HA=0
ha = min(np.abs(ha_s), | np.abs(ha_e) | numpy.abs |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = np.array([back])
liststrgvarb = []
if iele != 'none':
for l in gmod.indxpopl:
if iele == 'full':
listiele = np.arange(gmod.maxmpara.numbelem)
else:
listiele = [iele]
for k in listiele:
liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))
if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':
for l in indxpopltemp:
liststrgvarb.append(strgvarb + 'pop%d' % l)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':
for e in indxisfrtemp:
liststrgvarb.append(strgvarb + 'isf%d' % e)
if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':
for i in indxenertemp:
for m in indxevtttemp:
liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))
if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))
if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
liststrgvarb.append(strgvarb + 'back%04d' % c)
if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'en%02d' % i)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':
liststrgvarb.append(strgvarb)
if gdat.booldiagmode:
for strgvarb in liststrgvarb:
if liststrgvarb.count(strgvarb) != 1:
print('liststrgvarb')
print(liststrgvarb)
print('popl')
print(popl)
print('ener')
print(ener)
print('evtt')
print(evtt)
print('back')
print(back)
print('isfr')
print(isfr)
print('iele')
print(iele)
raise Exception('')
return liststrgvarb
def setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \
popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \
boolinvr=False, \
strgmodl=None, strgstat=None, \
):
'''
Set up variable values across all models (true and fitting) as well as all populations, energy bins,
event bins, background components, and Sersic components
'''
# determine the list of models
if strgmodl is None:
if gdat.typedata == 'mock':
liststrgmodl = ['true', 'fitt', 'plot']
else:
liststrgmodl = ['fitt', 'plot']
else:
if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':
liststrgmodl = [strgmodl]
else:
liststrgmodl = ['fitt', 'plot']
print('liststrgmodl')
print(liststrgmodl)
for strgmodl in liststrgmodl:
if strgmodl == 'plot':
gmod = gdat.fitt
gmodoutp = gdat
else:
gmod = getattr(gdat, strgmodl)
gmodoutp = gmod
# get the list of names of the variable
liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')
if iele != 'none':
liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)
else:
liststrgvarb = liststrgvarbnone
# set the values of each variable in the list
for strgvarb in liststrgvarb:
if minm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)
if maxm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)
if mean is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)
if stdv is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)
if valu is not None:
if strgstat is None:
print('strgvarb')
print(strgvarb)
print('strgmodl')
print(strgmodl)
print('valu')
print(valu)
print('')
setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)
elif strgstat == 'this':
setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)
if scal is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)
if lablroot is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)
if lablunit is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)
if cmap is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)
setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)
# create limt, bins, mean, and delt
if minm is not None and maxm is not None or mean is not None and stdv is not None:
# determine minima and maxima for Gaussian or log-Gaussian distributed parameters
if mean is not None:
minm = mean - gdat.numbstdvgaus * stdv
maxm = mean + gdat.numbstdvgaus * stdv
# uniformly-distributed
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsunif = np.linspace(minm, maxm, numbbins + 1)
if scal == 'logt' or scal == 'powr':
binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsunif = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numbbins + 1)
if boolinvr:
binsunif = binsunif[::-1]
meanparaunif = (binsunif[1:] + binsunif[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanpara = meanparaunif
bins = binsunif
minmunif = minm
maxmunif = maxm
if scal == 'logt' or scal == 'powr':
meanpara = 10**meanparaunif
bins = 10**binsunif
minmunif = np.log10(minm)
maxmunif = np.log10(maxm)
if scal == 'asnh':
meanpara = np.sinh(meanparaunif)
bins = np.sinh(binsunif)
minmunif = np.arcsinh(minm)
maxmunif = np.arcsinh(maxm)
delt = np.diff(bins)
limt = np.array([minm, maxm])
# 'self' is not yet defined
if scal == 'asnh' or scal == 'logt' or scal == 'powr':
listvalutickmajr, listlabltickmajr, listvalutickminr, listlabltickminr = tdpy.retr_valulabltick(minm, maxm, scal)
setattr(gmodoutp.labltickmajrpara, strgvarb, listlabltickmajr)
setattr(gmodoutp.valutickmajrpara, strgvarb, listvalutickmajr)
setattr(gmodoutp.labltickminrpara, strgvarb, listlabltickminr)
setattr(gmodoutp.valutickminrpara, strgvarb, listvalutickminr)
#labltick = np.empty(gdat.numbtickcbar, dtype=object)
#for k in range(gdat.numbtickcbar):
# if scal == 'asnh':
# valutick[k] = np.sinh(tickunif[k])
# if scal == 'logt' or scal == 'powr':
# valutick[k] = 10**(tickunif[k])
# # avoid very small, but nonzero central values in the residual count color maps
# if strgcbar == 'cntpresi' and np.fabs(valutick[k]) < 1e-5:
# valutick[k] = 0.
# if strgcbar == 'cntpdata' and np.amax(valutick) > 1e3:
# labltick[k] = '%d' % valutick[k]
# else:
# labltick[k] = '%.3g' % valutick[k]
setattr(gmodoutp.limtpara, strgvarb, limt)
setattr(gmodoutp.binspara, strgvarb, bins)
setattr(gmodoutp.meanpara, strgvarb, meanpara)
setattr(gmodoutp.deltpara, strgvarb, delt)
def retr_ticklabltemp(gdat, strgcbar):
minm = getattr(gdat.minmpara, strgcbar)
maxm = getattr(gdat.maxmpara, strgcbar)
scal = getattr(gdat.scalpara, strgcbar)
numb = gdat.numbtickcbar - 1
retr_axis(gdat, strgcbar, numb=numb)
minmscal = minm
if scal == 'asnh':
minmscal = np.arcsinh(minmscal)
if scal == 'logt':
minmscal = np.log10(minmscal)
maxmscal = maxm
if scal == 'asnh':
maxmscal = np.arcsinh(maxmscal)
if scal == 'logt':
maxmscal = np.log10(maxmscal)
tickscal = np.linspace(minmscal, maxmscal, gdat.numbtickcbar)
labl = np.empty(gdat.numbtickcbar, dtype=object)
tick = np.copy(tickscal)
for k in range(gdat.numbtickcbar):
if scal == 'asnh':
tick[k] = np.sinh(tickscal[k])
elif scal == 'logt':
tick[k] = 10**(tickscal[k])
# avoid very small, but nonzero central values in the residual count color maps
if strgcbar == 'cntpresi' and np.fabs(tick[k]) < 1e-5:
tick[k] = 0.
if strgcbar == 'cntpdata' and np.amax(tick) > 1e3:
labl[k] = '%d' % tick[k]
else:
labl[k] = '%.3g' % tick[k]
setattr(gdat.tickpara, strgcbar, tick)
def retr_axistemp(gdat, strgvarb, strgmodl=None, boolinvr=False):
if strgmodl is None:
listgdattemp = [gdat]
for strgmodl in gdat.liststrgmodl:
listgdattemp.append(getattr(gdat, strgmodl))
elif strgmodl == 'fitt' or strgmodl == 'true':
listgdattemp = [getattr(gdat, strgmodl)]
elif strgmodl == 'allm':
listgdattemp = []
for strgmodl in gdat.liststrgmodl:
listgdattemp = getattr(gdat, strgmodl)
for gdattemp in listgdattemp:
minm = getattr(gdattemp.minmpara, strgvarb)
maxm = getattr(gdattemp.maxmpara, strgvarb)
numb = getattr(gdattemp.numbbinspara, strgvarb)
scal = getattr(gdattemp.scalpara, strgvarb)
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsscal = np.linspace(minm, maxm, numb + 1)
if scal == 'logt':
print('minm')
print(minm)
print('maxm')
print(maxm)
print('strgvarb')
print(strgvarb)
binsscal = np.linspace(np.log10(minm), np.log10(maxm), numb + 1)
print('')
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsscal = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numb + 1)
if boolinvr:
binsscal = binsscal[::-1]
meanvarbscal = (binsscal[1:] + binsscal[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanvarb = meanvarbscal
bins = binsscal
if scal == 'logt':
meanvarb = 10**meanvarbscal
bins = 10**binsscal
if scal == 'asnh':
meanvarb = np.sinh(meanvarbscal)
bins = np.sinh(binsscal)
delt = np.diff(bins)
limt = np.array([np.amin(bins), np.amax(bins)])
setattr(gdattemp.limtpara, strgvarb, limt)
setattr(gdattemp.binspara, strgvarb, bins)
setattr(gdattemp.meanpara, strgvarb, meanvarb)
setattr(gdattemp.deltpara, strgvarb, delt)
def setp_varbcore(gdat, strgmodl, gdattemp, strgvarbtemp, valu):
# check if the variable is defined by the user
try:
valutemp = getattr(gdattemp, strgvarbtemp)
if valutemp is None:
raise
if gdat.typeverb > 0:
print('Received custom value for %s, %s: %s' % (strgvarbtemp, strgmodl, valutemp))
# if not defined or defined as None, define it
except:
setattr(gdattemp, strgvarbtemp, valu)
def intp_sinc(gdat, lgal, bgal):
intpsinc = 4. * gdat.numbsidepsfn**2 * np.sum(gdat.temppsfn * sinc(gdat.numbsidepsfn * (gdat.gridpsfnlgal + lgal) - gdat.gridpsfnlgal) * \
sinc(gdat.numbsidepsfn * (gdat.gridpsfnbgal + bgal) - gdat.gridpsfnbgal))
return intpsinc
def retr_fluxbrgt(gdat, lgal, bgal, flux):
if lgal.size == 0:
fluxbrgt = np.array([0.])
fluxbrgtassc = np.array([0.])
else:
indxbrgt = np.argmax(flux)
fluxbrgt = flux[indxbrgt]
return fluxbrgt, fluxbrgtassc
def init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot):
figrsize = (gdat.sizeimag, gdat.sizeimag)
figr, axis = plt.subplots(figsize=figrsize)
nameplot = strgplot
if gdat.numbener > 1:
nameplot += 'en%02d' % gdat.indxenerincl[indxenerplot]
if gdat.numbener > 1:
if indxevttplot == -1:
nameplot += 'evtA'
else:
nameplot += 'evt%d' % gdat.indxevttincl[indxevttplot]
if gdat.fitt.numbpopl > 1:
if indxpoplplot == -1:
nameplot += 'popA'
else:
nameplot += 'pop%d' % indxpoplplot
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
print('gdat.fitt.labltotlpara.lgalpop0')
print(gdat.fitt.labltotlpara.lgalpop0)
print('gdat.fitt.labltotlpara.bgalpop0')
print(gdat.fitt.labltotlpara.bgalpop0)
axis.set_xlabel(gdat.fitt.labltotlpara.lgalpop0)
axis.set_ylabel(gdat.fitt.labltotlpara.bgalpop0)
titl = ''
if indxenerplot is not None and gdat.numbener > 1 and strgplot.endswith('cnts'):
titl = gdat.strgener[indxenerplot]
if indxevttplot is not None and gdat.numbevtt > 1 and strgplot.endswith('cnts'):
titl += ' ' + gdat.strgevtt[indxevttplot]
axis.set_title(titl)
return figr, axis, path
def draw_frambndr(gdat, axis):
outr = max(gdat.frambndrmodl, gdat.frambndrdata)
axis.set_xlim([-outr, outr])
axis.set_ylim([-outr, outr])
innr = min(gdat.frambndrmodl, gdat.frambndrdata)
axis.axvline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axvline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
def retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot=None, indxevttplot=-1, booltdim=False, imag=None):
draw_frambndr(gdat, axis)
# take the relevant energy and PSF bins
if indxenerplot is not None:
if indxevttplot == -1:
maps = np.sum(maps[indxenerplot, ...], axis=1)
else:
maps = maps[indxenerplot, :, indxevttplot]
# project the map to 2D
if gdat.typepixl == 'heal':
maps = tdpy.retr_cart(maps, indxpixlrofi=gdat.indxpixlrofi, numbsideinpt=gdat.numbsideheal, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata)
if gdat.typepixl == 'cart':
shap = [gdat.numbsidecart] + list(maps.shape)
shap[1] = gdat.numbsidecart
shapflat = list(maps.shape)
shapflat[0] = gdat.numbpixlfull
mapstemp = np.zeros(shapflat)
if maps.size == gdat.indxpixlrofi.size:
mapstemp[gdat.indxpixlrofi, ...] = maps
else:
mapstemp[:, ...] = maps
maps = mapstemp.reshape(shap).swapaxes(0, 1)
# temp -- this is needed to bring the Fermi-LAT map to the right direction
#maps = fliplr(maps)
# rescale the map
if strgmodl is not None:
gmod = getattr(gdat, strgmodl)
else:
gmod = gdat
scal = getattr(gdat.scalpara, strgcbar)
cmap = getattr(gdat.cmappara, strgcbar)
vmin = getattr(gdat.minmpara, strgcbar)
vmax = getattr(gdat.maxmpara, strgcbar)
if scal == 'asnh':
maps = np.arcsinh(maps)
if scal == 'logt':
maps = np.log10(maps)
if imag is None:
imag = axis.imshow(maps, cmap=cmap, origin='lower', extent=gdat.exttrofi, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=gdat.alphmaps)
return imag
else:
imag.set_data(maps)
def make_cbar(gdat, axis, imag, strgvarb):
# make a color bar
valutickmajr = getattr(gdat.valutickmajrpara, strgvarb)
labltickmajr = getattr(gdat.labltickmajrpara, strgvarb)
print('valutickmajr')
print(valutickmajr)
print('labltickmajr')
print(labltickmajr)
cbar = plt.colorbar(imag, ax=axis, fraction=0.05, aspect=15)
cbar.set_ticks(valutickmajr)
cbar.set_ticklabels(labltickmajr)
return cbar
def make_legdmaps(gdat, strgstat, strgmodl, axis, mosa=False, assc=False):
gmod = getattr(gdat, strgmodl)
# transdimensional elements
if strgmodl == 'fitt' and (strgstat == 'pdfn' and gdat.boolcondcatl or strgstat == 'this') and gmod.numbparaelem > 0:
for l in gmod.indxpopl:
colr = retr_colr(gdat, strgstat, strgmodl, l)
if strgstat == 'pdfn':
labl = 'Condensed %s %s' % (gmod.legd, gmod.legdpopl[l])
else:
labl = 'Sample %s %s' % (gmod.legd, gmod.legdpopl[l])
if not gmod.maxmpara.numbelem[l] == 0:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=labl, marker=gmod.listelemmrkr[l], lw=gdat.mrkrlinewdth, color=colr)
for q in gdat.indxrefr:
if not np.amax(gdat.refr.numbelem[q]) == 0:
if assc:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=gdat.refr.lablhits[q], marker=gdat.refr.listmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablmiss[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
else:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablelem[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
# fixed-dimensional objects
if strgmodl == 'fitt':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gmod.lablmodl, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gmod.lablmodl, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gdat.typedata == 'mock':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gdat.refr.labl, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gdat.refr.labl, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
temphand, temp = axis.get_legend_handles_labels()
numblabl = len(temp)
if numblabl == 4:
numbcols = 2
else:
numbcols = 3
if mosa:
axis.legend(bbox_to_anchor=[1., 1.15], loc='center', ncol=numbcols)
else:
axis.legend(bbox_to_anchor=[0.5, 1.15], loc='center', ncol=numbcols)
def supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot=-1, assc=False):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
# associations with the reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] > 0:
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
reframpl = gdat.refr.dictelem[q][gdat.refr.nameparagenrelemampl[q]][0, :]
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl, gdat.refr.nameparagenrelemampl[q])
lgal = np.copy(gdat.refr.dictelem[q]['lgal'][0, :])
bgal = np.copy(gdat.refr.dictelem[q]['bgal'][0, :])
numbelem = int(gdat.refr.numbelem[q])
if gdatmodi is not None and gmod.numbparaelem > 0 and assc:
### hit
indx = gdatmodi.this.indxelemrefrasschits[q][l]
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, label=gdat.refr.lablhits, \
marker=gdat.refrlistmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
### missed
indx = gdatmodi.this.indxelemrefrasscmiss[q][l]
else:
indx = np.arange(lgal.size)
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.listlablmiss, marker=gdat.refr.listmrkrmiss[q], \
lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
sizexoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
sizeyoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
axis.text(gdat.anglfact * lgal[indx[k]] + sizexoff, gdat.anglfact * bgal[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], \
verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
# temp -- generalize this to input refrlgalhost vs.
if gdat.typedata == 'mock':
## host galaxy position
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
lgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost, gdat.anglfact * bgalhost, facecolor='none', alpha=0.7, \
label='%s Host %d' % (gdat.refr.labl, e), s=300, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.boollens:
## host galaxy Einstein radius
for e in gmod.indxsersfgrd:
truelgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
truebgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
truebeinhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * truelgalhost, \
gdat.anglfact * truebgalhost), \
gdat.anglfact * truebeinhost, \
edgecolor=gdat.refr.colr, facecolor='none', lw=gdat.mrkrlinewdth))
if gmod.boollens:
## source galaxy position
axis.scatter(gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.lgalsour], \
gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.bgalsour], \
facecolor='none', \
alpha=0.7, \
#alpha=gdat.alphelem, \
label='%s Source' % gdat.refr.labl, s=300, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
# model catalog
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
if gdatmodi is not None:
if gmod.numbparaelem > 0:
colr = retr_colr(gdat, strgstat, strgmodl, l)
mrkrsize = retr_mrkrsize(gdat, strgmodl, gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]], gmod.nameparagenrelemampl[l])
if 'lgal' in gdatmodi.this.indxparagenrfullelem:
lgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['lgal']]
bgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['bgal']]
else:
gang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['gang']]
aang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['aang']]
lgal, bgal = retr_lgalbgal(gang, aang)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, alpha=gdat.alphelem, label='Sample', marker=gmod.listelemmrkr[l], \
lw=gdat.mrkrlinewdth, color=colr)
## source
if gmod.boollens:
lgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.bgalsour]
axis.scatter(gdat.anglfact * lgalsour, gdat.anglfact * bgalsour, facecolor='none', \
alpha=gdat.alphelem, \
label='%s Source' % gmod.lablpara, s=300, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
## host
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e], facecolor='none', \
alpha=gdat.alphelem, \
label='%s Host' % gmod.lablpara, s=300, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.boollens:
beinhost = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e]), \
gdat.anglfact * beinhost, edgecolor=gmod.colr, facecolor='none', \
lw=gdat.mrkrlinewdth, ls='--'))
# temp
if strgstat == 'pdfn' and gdat.boolcondcatl and gmod.numbparaelem > 0:
lgal = np.zeros(gdat.numbprvlhigh)
bgal = np.zeros(gdat.numbprvlhigh)
ampl = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
lgal[cntr] = gdat.dictglob['poststkscond'][r]['lgal'][0]
bgal[cntr] = gdat.dictglob['poststkscond'][r]['bgal'][0]
# temp -- this does not allow sources with different spectra to be assigned to the same stacked sample
ampl[cntr] = gdat.dictglob['poststkscond'][r][gmod.nameparagenrelemampl[l]][0]
cntr += 1
mrkrsize = retr_mrkrsize(gdat, strgmodl, ampl, gmod.nameparagenrelemampl[l])
colr = retr_colr(gdat, strgstat, strgmodl, l)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
label='Condensed', marker=gmod.listelemmrkr[l], color='black', lw=gdat.mrkrlinewdth)
for r in gdat.indxstkscond:
lgal = np.array([gdat.dictglob['liststkscond'][r]['lgal']])
bgal = np.array([gdat.dictglob['liststkscond'][r]['bgal']])
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
marker=gmod.listelemmrkr[l], color='black', alpha=0.1, lw=gdat.mrkrlinewdth)
def retr_colr(gdat, strgstat, strgmodl, indxpopl=None):
if strgmodl == 'true':
if indxpopl is None:
colr = gdat.refr.colr
else:
colr = gdat.refr.colrelem[indxpopl]
if strgmodl == 'fitt':
if strgstat == 'this' or strgstat == 'pdfn':
if indxpopl is None:
colr = gmod.colr
else:
colr = gmod.colrelem[indxpopl]
if strgstat == 'mlik':
colr = 'r'
return colr
def retr_levipost(listllik):
minmlistllik = np.amin(listllik)
levipost = np.log(np.mean(1. / np.exp(listllik - minmlistllik))) + minmlistllik
return levipost
def retr_infofromlevi(pmeallik, levi):
info = pmeallik - levi
return info
def retr_jcbn():
fluxpare, lgalpare, bgalpare, fluxauxi, lgalauxi, bgalauxi = sympy.symbols('fluxpare lgalpare bgalpare fluxauxi lgalauxi bgalauxi')
matr = sympy.Matrix([[ fluxpare, fluxauxi, 0, 0, 0, 0], \
[-fluxpare, 1 - fluxauxi, 0, 0, 0, 0], \
[-lgalauxi, 0, 1, 1 - fluxauxi, 0, 0], \
[-lgalauxi, 0, 1, -fluxauxi, 0, 0], \
[-bgalauxi, 0, 0, 0, 1, 1 - fluxauxi], \
[-bgalauxi, 0, 0, 0, 1, -fluxauxi]])
jcbn = matr.det()
return jcbn
# f1 = uf f0
# f2 = (1 - uf) f0
# x1 = x0 + (1 - uf) ux
# x2 = x0 - uf ux
# y1 = y0 + (1 - uf) uy
# y2 = y0 - uf uy
# f1/uf f1/f0 f1/x0 f1/ux f1/y0 f1/uy
# f2/uf f2/f0 f2/x0 f2/ux f2/y0 f2/uy
# x1/uf x1/f0 x1/x0 x1/ux x1/y0 x1/uy
# x2/uf x2/f0 x2/x0 x2/ux x2/y0 x2/uy
# y1/uf y1/f0 y1/x0 y1/ux y1/y0 y1/uy
# y2/uf y2/f0 y2/x0 y2/ux y2/y0 y2/uy
# f0 uf 0 0 0 0
# -f0 1 - uf 0 0 0 0
# -ux 0 1 1 - uf 0 0
# -ux 0 1 -uf 0 0
# -uy 0 0 0 1 1 - uf
# -uy 0 0 0 1 -uf
# f0
#retr_jcbn()
def retr_angldist(gdat, lgalfrst, bgalfrst, lgalseco, bgalseco):
# temp -- heal does not work when the dimension of lgalfrst is 1
if gdat.typepixl == 'heal':
dir1 = np.array([lgalfrst, bgalfrst])
dir2 = np.array([lgalseco, bgalseco])
angldist = hp.rotator.angdist(dir1, dir2)
else:
angldist = np.sqrt((lgalfrst - lgalseco)**2 + (bgalfrst - bgalseco)**2)
return angldist
def retr_deflextr(gdat, indxpixlelem, sher, sang):
factcosi = sher * np.cos(2. * sang)
factsine = sher * np.cos(2. * sang)
defllgal = factcosi * gdat.lgalgrid[indxpixlelem] + factsine * gdat.bgalgrid[indxpixlelem]
deflbgal = factsine * gdat.lgalgrid[indxpixlelem] - factcosi * gdat.bgalgrid[indxpixlelem]
return np.vstack((defllgal, deflbgal)).T
def readfile(path):
print('Reading %s...' % path)
filepick = open(path + '.p', 'rb')
filearry = h5py.File(path + '.h5', 'r')
gdattemptemp = pickle.load(filepick)
for attr in filearry:
setattr(gdattemptemp, attr, filearry[attr][()])
filepick.close()
filearry.close()
if 'gdatfinl' in path or 'gdatinit' in path:
if hasattr(gdattemptemp, 'edis') and gdattemptemp.edis is not None and hasattr(gdattemptemp, 'binsener'):
gdattemptemp.edisintp = sp.interpolate.interp1d(gdattemptemp.binsener, gdattemptemp.edis, fill_value='extrapolate')
gdattemptemp.adisobjt = sp.interpolate.interp1d(gdattemptemp.redsintp, gdattemptemp.adisintp, fill_value='extrapolate')
gdattemptemp.redsfromdlosobjt = sp.interpolate.interp1d(gdattemptemp.adisintp * gdattemptemp.redsintp, \
gdattemptemp.redsintp, fill_value='extrapolate')
return gdattemptemp
def init_stat(gdat):
# construct the initial state
if gdat.typeverb > 0:
print('Initializing the sampler state...')
print('inittype')
print(gdat.inittype)
gmod = gdat.fitt
## initialization
### initialize the unit sample vector randomly
gmod.this.paragenrunitfull = np.random.rand(gmod.numbparagenrfull)
gmod.this.paragenrscalfull = np.empty(gmod.numbparagenrfull)
## impose user-specified initial state
### number of elements
## create dummy indxparagenrfullelem
gmod.this.indxparagenrfullelem = None
if gmod.numbparaelem > 0:
if gdat.inittype == 'refr':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gmod.paragenrunitfull[gmod.indxpara.numbelem[l]]
else:
for l in gmod.indxpopl:
if gmod.typemodltran == 'pois':
meanelemtemp = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, \
gmod.this.indxparagenrfullelem)[gmod.indxpara.meanelem[l]]
print('temp -- user input is not working for numbelem')
#namevarb = 'numbelempop%d' % l
#initvalu = getattr(gmod.init, namevarb)
#if initvalu > gmod.maxmpara.numbelem[l] or initvalu < gmod.minmpara.numbelem[l]:
# raise Exception('Bad initial number of elements...')
#gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = initvalu
if gmod.typemodltran == 'pois':
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = np.random.poisson(meanelemtemp)
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = round(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
min(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.maxmpara.numbelem[l])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
max(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.minmpara.numbelem[l])
gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]] = gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]]
if gdat.booldiagmode:
if gdat.typedata == 'mock' and gdat.inittype == 'refr':
for l in gmod.indxpopl:
if gmod.paragenrunitfull[gmod.indxpara.numbelem[l]] > gmod.maxmpara.numbelem[l]:
raise Exception('')
if gmod.numbparaelem > 0:
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.inittype == 'reco':
if gdat.namerecostat is not None:
strgcnfg = gdat.namerecostat
else:
strgcnfg = gdat.strgcnfg
path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'
if os.path.exists(path):
boolinitreco = True
thisfile = h5py.File(path, 'r')
if gdat.typeverb > 0:
print('Initializing from the state %s...' % path)
print('Likelihood:')
print(thisfile['lliktotl'][...])
# find the number of populations provided
maxmindxpopl = 0
for l in range(10):
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
if gmod.indxpopl > maxmindxpopl:
maxmindxpopl = gmod.indxpopl
numbpoplinpt = maxmindxpopl + 1
if numbpoplinpt != gmod.numbpopl:
print('State file and fitting metamodel have different number of populations.')
# find the number of elements provided
cntr = np.zeros(gmod.numbpoplinpt, dtype=int)
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
cntr[indxpopl] += 1
if gdat.typeverb > 0:
print('Number of elements found:')
print(cntr)
for attr in thisfile:
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase == attr:
if gmod.nameparagenrbase.startswith('numbelem'):
try:
indxpopltemp = int(gmod.nameparagenrbase[-1])
initnumbelem = getattr(gdat, 'initnumbelempop%d' % indxpopltemp)
print('Initial condition for the number of elements conflicts with the state file. Defaulting to the argument...')
except:
initnumbelem = thisfile[attr][()]
gmod.this.paragenrunitfull[k] = initnumbelem
else:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', thisfile[attr][()], k)
if gmod.this.paragenrunitfull[k] == 0.:
print('Warning CDF is zero.')
if not np.isfinite(thisfile[attr][()]):
raise Exception('Retreived state parameter is not finite.')
if (gmod.numbparaelem == 0 or gmod.numbparaelem > 0 and not k in gmod.indxpara.numbelem) and \
(not np.isfinite(gmod.this.paragenrunitfull[k]) or gmod.this.paragenrunitfull[k] < 0. or \
gmod.this.paragenrunitfull[k] > 1.):
raise Exception('CDF of the retreived state parameter is bad.')
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
maxm.numbelem = getattr(gdat.fitt.maxm, 'numbelempop%d' % l)
if gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] > maxm.numbelem:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = maxm.numbelem
if gdat.typeverb > 0:
print('Tapering off the element list...')
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
if gdat.typeverb > 0:
print('gmod.this.paragenrunitfull[gmod.indxpara.numbelem]')
print(gmod.this.paragenrunitfull[gmod.indxpara.numbelem])
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if (gmod.this.paragenrunitfull == 0).all():
raise Exception('Bad initialization.')
if gmod.numbparaelem > 0 and gmod.this.indxparagenrfullelem is not None:
for nameparagenrelem in gmod.namepara.elem:
initcomp = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
initcomp[l] = np.empty(len(gmod.this.indxelemfull[l]))
for k in range(len(gmod.this.indxelemfull[l])):
namefiel = '%spop%d%04d' % (nameparagenrelem, l, k)
for attr in thisfile:
if namefiel == attr:
initcomp[l][k] = thisfile[namefiel][()]
setattr(gdat, 'init' + nameparagenrelem, initcomp)
initcompfromstat(gdat, gdatmodi, 'init')
thisfile.close()
else:
boolinitreco = False
if gdat.typeverb > 0:
print('Could not find the state file, %s, to initialize the sampler.' % path)
if gdat.inittype == 'refr':
if gdat.typedata == 'inpt':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gdat.refr.numbelem[l]
if gdat.typedata == 'mock':
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if not (gdat.inittype == 'pert' and gmod.nameparagenrbase.startswith('numbelem')) and \
gmod.nameparagenrbase in gmod.nameparagenrbase:
gmod.indxpara.true = np.where(gmod.nameparagenrbase == gmod.nameparagenrbase)[0]
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmodstat.paragenrscalfull[gmod.indxpara.true], k)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.typeverb > 1:
show_paragenrscalfull(gdat, gdatmodi)
if gmod.this.indxparagenrfullelem is not None:
print('Initializing elements from the reference element parameters...')
show_paragenrscalfull(gdat, gdatmodi)
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
show_paragenrscalfull(gdat, gdatmodi)
initcompfromstat(gdat, gdatmodi, 'refr')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
## impose user-specified individual initial values
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase.startswith('numbelem'):
continue
if gdat.inittype == 'reco' or gdat.inittype == 'refr' or gdat.inittype == 'pert':
try:
getattr(gdat, 'init' + gmod.nameparagenrbase)
print('Conflicting initial state arguments detected, init keyword takes precedence.')
except:
pass
try:
raise Exception('')
initvalu = getattr(gdat, 'init' + gmod.nameparagenrbase)
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', initvalu, k)
if gdat.typeverb > 0:
print('Received initial condition for %s: %.3g' % (gmod.nameparagenrbase, initvalu))
except:
pass
## PSF
if gdat.initpsfp is not None:
print('Initializing the metamodel PSF from the provided initial state...')
if gdat.initpsfp.size != gmod.indxpara.psfp.size:
raise Exception('')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gdat.initpsfp[k-gmod.indxpara.psfp[0]], k)
if gdat.initpsfprefr:
print('Initializing the metamodel PSF from the reference state...')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmod.psfpexpr[k-gmod.indxpara.psfp[0]], k)
if gdat.inittype == 'rand' or gdat.inittype == 'reco' and not boolinitreco:
if gdat.typeverb > 0:
print('Initializing from a random state...')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
# check the initial unit sample vector for bad entries
if gmod.numbparaelem > 0:
indxsampdiff = np.setdiff1d(gmod.indxparagenrfull, gmod.indxpara.numbelem)
if np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])).any():
raise Exception('')
indxsampbaddlowr = np.where((gmod.this.paragenrunitfull[indxsampdiff] <= 0.) | np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])))[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull[indxsampdiff] >= 1.)[0]
indxsampbaddlowr = indxsampdiff[indxsampbaddlowr]
indxsampbadduppr = indxsampdiff[indxsampbadduppr]
else:
indxsampbaddlowr = np.where(gmod.this.paragenrunitfull <= 0.)[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull >= 1.)[0]
indxsampbadd = np.concatenate((indxsampbaddlowr, indxsampbadduppr))
if indxsampbadd.size > 0:
print('Initial value caused unit sample vector to go outside the unit interval...')
show_paragenrscalfull(gdat, gdatmodi, indxsampshow=indxsampbadd)
gmod.this.paragenrunitfull[indxsampbadd] = np.random.rand(indxsampbadd.size)
raise Exception('')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
indxbadd = np.where(np.logical_not( | np.isfinite(gmod.this.paragenrscalfull) | numpy.isfinite |
import argparse
import SimpleITK as sitk
import skimage
import nibabel as ni
import numpy as np
import matplotlib.pyplot as plt
import skimage.measure
from pathlib import Path
def isocont(img_arr,
mask_arr,
b_out_labeled_mask=False,
b_class_components=True,
b_use_percentile_threshold=True,
percentile_threshold=25,
maximum_threshold=10,
verbose=True):
"""
Computes a mask based on percentile/relative maximum SUV value thresholds inside all
connected components of the original mask.
Args:
img_arr: array of a PET-SUV image.
mask_arr: array of a tumor mask.
b_out_labeled_mask: Output labeled component mask, no thresholding.
b_class_components: Detected connected components and use component based threshold.
b_use_percentile_threshold: Use percentile based thresholds (otherwise relative maximum value thresholds
are used.
percentile_threshold: Set percentile (SUV value) threshold in percent.
maximum_threshold: Set relative maximum (SUV value) threshold.
verbose:
Returns: array of the new mask.
"""
maximum_threshold = float(maximum_threshold)
# Get numpy array from sitk objects.
# amask = sitk.GetArrayFromImage(mask)
# animg = sitk.GetArrayFromImage(img)
amask = mask_arr
animg = img_arr
# Classify connected image components
if b_class_components:
amask_comp, num_comp = skimage.measure.label(amask,
neighbors=None,
background=None,
return_num=True,
connectivity=None)
else:
amask_comp = amask
num_comp = 1
print(f'Detected {num_comp} connected components.')
# Create new mask based on the selected threshold.
amask_th = np.zeros_like(amask)
# Calculate SUV value thresholds for each connected component.
for comp in range(num_comp):
if verbose:
print(f'Component {comp}')
sel_comp = (amask_comp == (comp + 1))
# Get SUV values inside the selected component.
suv_values = animg[sel_comp]
suv_max = np.max(suv_values)
if verbose:
print(f'#SUV values {suv_values.shape}')
print(f'Max. SUV value: {np.max(suv_values)}')
print(f'{percentile_threshold} percentile SUV value threshold {np.percentile(suv_values, percentile_threshold)}')
print(f'Relative max. SUV value threshold ({maximum_threshold}%): {suv_max * maximum_threshold/100.0}')
if b_use_percentile_threshold:
th = | np.percentile(suv_values, percentile_threshold) | numpy.percentile |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 29 18:13:24 2021
@author: tae-jun_yoon
"""
import numpy as np
from scipy.signal import savgol_filter
from scipy.optimize import newton
from PyOECP import References
def ListReferences():
AvailableReferences = dir(References)
for EachReference in AvailableReferences:
if '_' in EachReference and '__' not in EachReference:
print(EachReference)
def ReferenceDetail(ReferenceName):
from pprint import pprint
''' Print out detailed information about the reference spectra.
The Reference_name should be "string".
'''
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5),dpi=250)
frequency = np.array([1e9])
data = eval('References.'+ReferenceName)(frequency)
minimum_frequency = data['minFREQ']
maximum_frequency = data['maxFREQ']
frequency = np.logspace(np.log10(minimum_frequency),np.log10(maximum_frequency),100)
data = eval('References.'+ReferenceName)(frequency)
epsilon = data['epsilon']
plt.semilogx(frequency,np.real(epsilon),'r')
plt.semilogx(frequency,- | np.imag(epsilon) | numpy.imag |
import trimesh
from trimesh.base import Trimesh
import trimesh.creation
from trimesh.remesh import subdivide_to_size
import matplotlib.tri as mtri
import numpy as np
import torch
import quaternion
import os
from tqdm import tqdm_notebook as tqdm
from matplotlib import pyplot as plt
def plot_info(history_grad_norm, history_quad_loss,
history_smooth_loss, history_loss,
history_p_deviation, history_p_deviation_target,
history_p_deviation_mean, history_p_deviation_target_mean):
plt.figure(figsize=(10, 8))
plt.semilogy(history_grad_norm)
plt.title('Grad norm')
plt.show()
plt.figure(figsize=(10, 8))
plt.semilogy(np.array(history_quad_loss))
plt.title('Quad energy')
plt.show()
plt.figure(figsize=(10, 8))
plt.plot(np.array(history_smooth_loss))
plt.title('Smooth energy')
plt.show()
plt.figure(figsize=(10, 8))
plt.semilogy(np.array(history_loss))
plt.title('Data energy')
plt.show()
plt.figure(figsize=(10, 8))
plt.semilogy(np.array(history_p_deviation), c='b', label='from origin')
plt.semilogy(np.array(history_p_deviation_target), c='r', label='from target')
plt.legend()
plt.title('Deviation')
plt.show()
plt.figure(figsize=(10, 8))
plt.semilogy(np.array(history_p_deviation_mean), c='b', label='from origin')
plt.semilogy(np.array(history_p_deviation_target_mean), c='r', label='from target')
plt.legend()
plt.title('Mean deviation')
plt.show()
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
def two_tetrahedrons():
vertices_1 = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
vertices_2 = np.array([[2, 0, 0], [3, 0, 0], [2, 1, 0], [2, 0, 2]])
faces_1 = np.array([[0, 1, 2], [0, 2, 3], [0, 1, 3], [1, 2, 3],
[0, 2, 1], [0, 3, 2], [0, 3, 1], [1, 3, 2]])
faces_2 = np.array([[0, 1, 2], [0, 2, 3], [0, 1, 3], [1, 2, 3],
[0, 2, 1], [0, 3, 2], [0, 3, 1], [1, 3, 2]])
mesh_1 = Trimesh(vertices_1, faces_1)
mesh_2 = Trimesh(vertices_2, faces_2)
return mesh_1, mesh_2
def sphere(subdivisions=3, radius=1.0):
mesh = trimesh.primitives.Sphere(subdivisions=subdivisions, radius=radius)
return mesh
def plane(width=2, length=2, num_points=2500):
x = np.linspace(0, length, num=int(np.sqrt(num_points)))
y = np.linspace(0, width, num=int(np.sqrt(num_points)))
x, y = np.meshgrid(x, y)
z = np.zeros_like(x)
tri = mtri.Triangulation(x.flatten(), y.flatten())
faces = tri.triangles
faces_dual = faces[: ,[0, 2, 1]]
faces = np.vstack([faces, faces_dual])
vertices = np.array([x.flatten(), y.flatten(), z.flatten()]).T
plane = Trimesh(vertices=vertices, faces=faces)
return plane
def saddle(num_points=2500):
def f(x, y):
return x ** 2 - y ** 2
x = np.linspace(-1, 1, num=int(np.sqrt(num_points)))
y = np.linspace(-1, 1, num=int(np.sqrt(num_points)))
x, y = np.meshgrid(x, y)
z = f(x, y)
tri = mtri.Triangulation(x.flatten(), y.flatten())
faces = tri.triangles
faces_dual = faces[: ,[0, 2, 1]]
faces = np.vstack([faces, faces_dual])
vertices = np.array([x.flatten(), y.flatten(), z.flatten()]).T
saddle = Trimesh(vertices=vertices, faces=faces)
return saddle
def monkey_saddle(num_points=2500):
def f(x, y):
return x ** 3 - 3 * x * y ** 2
x = np.linspace(-1, 1, num=int(np.sqrt(num_points)))
y = np.linspace(-1, 1, num=int(np.sqrt(num_points)))
x, y = np.meshgrid(x, y)
z = f(x, y)
tri = mtri.Triangulation(x.flatten(), y.flatten())
faces = tri.triangles
faces_dual = faces[: ,[0, 2, 1]]
faces = np.vstack([faces, faces_dual])
vertices = np.array([x.flatten(), y.flatten(), z.flatten()]).T
saddle = Trimesh(vertices=vertices, faces=faces)
return saddle
def box(size=(1, 1, 1), max_edge=0.1):
box = trimesh.creation.box(extents=size)
vertices, faces = subdivide_to_size(box.vertices, box.faces, max_edge)
mesh = Trimesh(vertices, faces)
return mesh
def mesh_pcloud(points, size=0.1, color=None):
boxes = []
for point in points:
box = trimesh.creation.box(extents=(size, size, size))
box.apply_transform(translate([point - np.array([size/2, size/2, size/2])]))
if color is not None:
for facet in box.facets:
box.visual.face_colors[facet] = color
boxes += [box]
boxes = trimesh.util.concatenate(boxes)
return boxes
def set_new_mesh_vertices(mesh, vertices):
mesh_new = Trimesh(vertices=vertices.copy(), faces=mesh.faces, process=False)
return mesh_new
def affine_transform(mesh, transform):
mesh_new = mesh.copy()
mesh_new.apply_transform(transform)
return mesh_new
def rot_x(angle=0):
angle = angle * np.pi / 180
return np.array([[1, 0, 0, 0],
[0, np.cos(angle), -np.sin(angle), 0],
[0, np.sin(angle), np.cos(angle), 0],
[0, 0, 0, 1]])
def rot_y(angle=0):
angle = angle * np.pi / 180
return np.array([[np.cos(angle), 0, -np.sin(angle), 0],
[0, 1, 0, 0],
[np.sin(angle), 0, np.cos(angle), 0],
[0, 0, 0, 1]])
def rot_z(angle=0):
angle = angle * np.pi / 180
return np.array([[np.cos(angle), -np.sin(angle), 0, 0],
[np.sin(angle), np.cos(angle), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def translate(vector=(0, 0, 0)):
transform = np.eye(4, dtype=float)
transform[:3, 3] = np.array(vector)
return transform
def scale(scale=(1, 1, 1)):
transform = np.eye(4, dtype=float)
transform[0, 0] = | np.array([scale[0]]) | numpy.array |
# Copyright 2021 <NAME>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import unittest
import numpy as np
import sempler
import sempler.generators
import utlvce.utils as utils
# ---------------------------------------------------------------------
NUM_GRAPHS = 500
class PDAG_to_CPDAG_Tests(unittest.TestCase):
# Tests to ensure that the conversion from PDAG to CPDAG
# works
def test_pdag_to_dag_1(self):
# Should work
P = np.array([[0, 0, 1, 0],
[0, 0, 1, 1],
[1, 0, 0, 0],
[0, 0, 0, 0]])
A = utils.pdag_to_dag(P, debug=False)
# print(A)
true_A = P.copy()
true_A[0, 2] = 0
self.assertTrue((A == true_A).all())
def test_pdag_to_dag_2(self):
# Same as above but different index order, should work
P = np.array([[0, 0, 1, 0],
[1, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 0, 0]])
A = utils.pdag_to_dag(P, debug=False)
# print(A)
true_A = P.copy()
true_A[2, 0] = 0
self.assertTrue((A == true_A).all())
def test_pdag_to_dag_3(self):
# Should work
P = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 1, 0, 0],
[0, 0, 0, 0]])
A = utils.pdag_to_dag(P, debug=False)
# print(A)
true_A1, true_A2 = P.copy(), P.copy()
true_A1[0, 2], true_A2[2, 0] = 0, 0
self.assertTrue(utils.member([true_A1, true_A2], A) is not None)
def test_pdag_to_dag_4(self):
# This PDAG does not admit a consistent extension, i.e. it
# either creates a non-existing v-structure or it induces a
# cycle
P = np.array([[0, 0, 1, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 0, 1, 0]])
try:
utils.pdag_to_dag(P, debug=False)
self.fail("Exception should have been thrown")
except ValueError as e:
print("OK:", e)
def test_pdag_to_dag_5(self):
# Fully directed PDAG should return itself
A = np.array([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0],
[0, 0, 1, 0]])
extension = utils.pdag_to_dag(A, debug=False)
self.assertTrue(utils.is_consistent_extension(extension, A))
self.assertTrue((extension == A).all())
def test_pdag_to_dag_6(self):
# Check that the resulting extensions are indeed a consistent
# extensions
G = NUM_GRAPHS
p = 20
for i in range(G):
A = sempler.generators.dag_avg_deg(p, 3, 1, 1)
cpdag = utils.dag_to_cpdag(A)
self.assertTrue(utils.is_consistent_extension(A, cpdag))
extension = utils.pdag_to_dag(cpdag, debug=False)
is_consistent_extension = utils.is_consistent_extension(
extension, cpdag)
if not is_consistent_extension:
print("DAG\n", A)
print("CPDAG\n", cpdag)
print("Extension\n", extension)
utils.is_consistent_extension(extension, cpdag, debug=True)
# Rerun with outputs
assert (extension == utils.pdag_to_dag(
cpdag, debug=True)).all()
self.assertTrue(is_consistent_extension)
print("\nChecked PDAG to DAG conversion for %d PDAGs" % (i + 1))
def test_order_edges_preconditions(self):
A = np.array([[0, 0, 1, 1, 1], [0, 0, 1, 1, 1], [
0, 0, 0, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]])
# Check that exception is thrown with pdag
pdag = A.copy()
pdag[4, 2] = 1
try:
utils.order_edges(pdag)
self.fail()
except ValueError as e:
print("OK:", e)
# Check that exception is thrown with cyclic graph
cyclic = pdag.copy()
cyclic[2, 4] = 0
try:
utils.order_edges(cyclic)
self.fail()
except ValueError as e:
print("OK:", e)
def test_order_edges_1(self):
A = np.array([[0, 0, 1, 1, 1], [0, 0, 1, 1, 1], [
0, 0, 0, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]])
ordered = utils.order_edges(A)
# print(ordered)
# Ground truth derived by hand for the order [1,0,2,3,4]
truth = np.array([[0, 0, 9, 6, 2],
[0, 0, 8, 5, 1],
[0, 0, 0, 7, 3],
[0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]])
self.assertTrue((ordered >= 0).all())
self.assertTrue((ordered == truth).all())
def test_order_edges_2(self):
G = NUM_GRAPHS
p = 20
for i in range(G):
A = sempler.generators.dag_avg_deg(p, 3, 1, 1)
ordered = utils.order_edges(A)
no_edges = (A != 0).sum()
self.assertEqual(sorted(ordered[ordered != 0]), list(
range(1, no_edges + 1)))
print("\nChecked valid ordering for %d DAGs" % (i + 1))
def test_label_edges_preconditions(self):
A = np.array([[0, 0, 1, 1, 1], [0, 0, 1, 1, 1], [
0, 0, 0, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]])
# Check that exception is thrown with pdag
pdag = A.copy()
pdag[4, 2] = 1
try:
utils.order_edges(pdag)
self.fail()
except ValueError as e:
print("OK:", e)
# Check that exception is thrown with cyclic graph
cyclic = pdag.copy()
cyclic[2, 4] = 0
try:
utils.order_edges(cyclic)
self.fail()
except ValueError as e:
print("OK:", e)
# Check that if ordering is invalid an exception is thrown
try:
utils.label_edges(A)
self.fail()
except ValueError as e:
print("OK:", e)
# Same same, but different :)
ordered = utils.order_edges(A)
ordered[0, 4] = 1
try:
utils.label_edges(ordered)
self.fail()
except ValueError as e:
print("OK:", e)
def test_label_edges_1(self):
# For a hand-picked example
A = np.array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
ordered = utils.order_edges(A)
truth = np.array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, -1],
[0, 0, 0, 0, 0]])
labelled = utils.label_edges(ordered)
self.assertTrue((labelled == truth).all())
def test_label_edges_2(self):
# With randomly generated DAGs
# np.random.seed(42)
G = NUM_GRAPHS
p = 20
for i in range(G):
A = sempler.generators.dag_avg_deg(p, 3, 1, 1)
# Construct expected output
cpdag = utils.dag_to_cpdag(A)
undirected = np.logical_and(cpdag, cpdag.T)
truth = A.copy()
truth[np.logical_and(truth, undirected)] = -1
# Run and assert
ordered = utils.order_edges(A)
labelled = utils.label_edges(ordered)
self.assertTrue((labelled == truth).all())
print("\nChecked edge labelling for %d DAGs" % (i + 1))
def test_dag_to_cpdag(self):
# Test by checking that applying the whole pipeline to a CPDAG
# returns the same CPDAG
G = NUM_GRAPHS
p = 25
for i in range(G):
A = sempler.generators.dag_avg_deg(p, 4, 1, 1)
truth = utils.dag_to_cpdag(A)
# Run and assert
cpdag = utils.dag_to_cpdag(A)
self.assertTrue((truth == cpdag).all())
print("\nChecked DAG to CPDAG conversion for %d DAGs" % (i + 1))
def test_cpdag_to_cpdag(self):
# Test by checking that applying the whole pipeline to a CPDAG
# returns the same CPDAG
G = NUM_GRAPHS
p = 30
for i in range(G):
A = sempler.generators.dag_avg_deg(p, 3, 1, 1)
cpdag = utils.dag_to_cpdag(A)
# Run and assert
output = utils.pdag_to_cpdag(cpdag)
self.assertTrue((output == cpdag).all())
print("\nChecked CPDAG to CPDAG conversion for %d CPDAGs" % (i + 1))
def test_pdag_to_cpdag(self):
# Now construct PDAGs whose extensions belong to the true MEC,
# and test that the true CPDAG is recovered
G = NUM_GRAPHS
p = 32
for g in range(G):
A = sempler.generators.dag_avg_deg(p, 3, 1, 1)
# Construct PDAG by undirecting random edges which do not
# belong to a v-structure.
# NOTE: I'm proceeding in this awkward way to avoid
# using functions from the pipeline I'm testing,
# i.e. utils.order_edges and utils.label_edges
pdag = A.copy()
mask_vstructs = | np.zeros_like(A) | numpy.zeros_like |
"""
## pyart radar object
pyart.core.radar
================
A general central radial scanning (or dwelling) instrument class.
.. autosummary::
:toctree: generated/
_rays_per_sweep_data_factory
_gate_data_factory
_gate_lon_lat_data_factory
_gate_altitude_data_factory
.. autosummary::
:toctree: generated/
:template: dev_template.rst
Radar
"""
# the code for Radar Object in this file were adapted from pyart by <NAME>. & <NAME>.
# https://github.com/ARM-DOE/pyart
from __future__ import print_function
import numpy as np
import sys
from ..configure.pyart_config import get_metadata
from ..configure.pyart_lazydict import LazyLoadDict
from .transforms import antenna_vectors_to_cartesian, cartesian_to_geographic
class Radar(object):
"""
A class for storing antenna coordinate radar data.
The structure of the Radar class is based on the CF/Radial Data file
format. Global attributes and variables (section 4.1 and 4.3) are
represented as a dictionary in the metadata attribute. Other required and
optional variables are represented as dictionaries in a attribute with the
same name as the variable in the CF/Radial standard. When a optional
attribute not present the attribute has a value of None. The data for a
given variable is stored in the dictionary under the 'data' key. Moment
field data is stored as a dictionary of dictionaries in the fields
attribute. Sub-convention variables are stored as a dictionary of
dictionaries under the meta_group attribute.
Refer to the attribute section for information on the parameters.
Attributes
----------
time : dict
Time at the center of each ray.
range : dict
Range to the center of each gate (bin).
fields : dict of dicts
Moment fields.
metadata : dict
Metadata describing the instrument and data.
scan_type : str
Type of scan, one of 'ppi', 'rhi', 'sector' or 'other'. If the scan
volume contains multiple sweep modes this should be 'other'.
latitude : dict
Latitude of the instrument.
longitude : dict
Longitude of the instrument.
altitude : dict
Altitude of the instrument, above sea level.
altitude_agl : dict or None
Altitude of the instrument above ground level. If not provided this
attribute is set to None, indicating this parameter not available.
sweep_number : dict
The number of the sweep in the volume scan, 0-based.
sweep_mode : dict
Sweep mode for each mode in the volume scan.
fixed_angle : dict
Target angle for thr sweep. Azimuth angle in RHI modes, elevation
angle in all other modes.
sweep_start_ray_index : dict
Index of the first ray in each sweep relative to the start of the
volume, 0-based.
sweep_end_ray_index : dict
Index of the last ray in each sweep relative to the start of the
volume, 0-based.
rays_per_sweep : LazyLoadDict
Number of rays in each sweep. The data key of this attribute is
create upon first access from the data in the sweep_start_ray_index and
sweep_end_ray_index attributes. If the sweep locations needs to be
modified, do this prior to accessing this attribute or use
:py:func:`init_rays_per_sweep` to reset the attribute.
target_scan_rate : dict or None
Intended scan rate for each sweep. If not provided this attribute is
set to None, indicating this parameter is not available.
rays_are_indexed : dict or None
Indication of whether ray angles are indexed to a regular grid in
each sweep. If not provided this attribute is set to None, indicating
ray angle spacing is not determined.
ray_angle_res : dict or None
If rays_are_indexed is not None, this provides the angular resolution
of the grid. If not provided or available this attribute is set to
None.
azimuth : dict
Azimuth of antenna, relative to true North. Azimuth angles are
recommended to be expressed in the range of [0, 360], but other
representations are not forbidden.
elevation : dict
Elevation of antenna, relative to the horizontal plane. Elevation
angles are recommended to be expressed in the range of [-180, 180],
but other representations are not forbidden.
gate_x, gate_y, gate_z : LazyLoadDict
Location of each gate in a Cartesian coordinate system assuming a
standard atmosphere with a 4/3 Earth's radius model. The data keys of
these attributes are create upon first access from the data in the
range, azimuth and elevation attributes. If these attributes are
changed use :py:func:`init_gate_x_y_z` to reset.
gate_longitude, gate_latitude : LazyLoadDict
Geographic location of each gate. The projection parameter(s) defined
in the `projection` attribute are used to perform an inverse map
projection from the Cartesian gate locations relative to the radar
location to longitudes and latitudes. If these attributes are changed
use :py:func:`init_gate_longitude_latitude` to reset the attributes.
projection : dic or str
Projection parameters defining the map projection used to transform
from Cartesian to geographic coordinates. The default dictionary sets
the 'proj' key to 'pyart_aeqd' indicating that the native Py-ART
azimuthal equidistant projection is used. This can be modified to
specify a valid pyproj.Proj projparams dictionary or string.
The special key '_include_lon_0_lat_0' is removed when interpreting
this dictionary. If this key is present and set to True, which is
required when proj='pyart_aeqd', then the radar longitude and
latitude will be added to the dictionary as 'lon_0' and 'lat_0'.
gate_altitude : LazyLoadDict
The altitude of each radar gate as calculated from the altitude of the
radar and the Cartesian z location of each gate. If this attribute
is changed use :py:func:`init_gate_altitude` to reset the attribute.
scan_rate : dict or None
Actual antenna scan rate. If not provided this attribute is set to
None, indicating this parameter is not available.
antenna_transition : dict or None
Flag indicating if the antenna is in transition, 1 = yes, 0 = no.
If not provided this attribute is set to None, indicating this
parameter is not available.
rotation : dict or None
The rotation angle of the antenna. The angle about the aircraft
longitudinal axis for a vertically scanning radar.
tilt : dict or None
The tilt angle with respect to the plane orthogonal (Z-axis) to
aircraft longitudinal axis.
roll : dict or None
The roll angle of platform, for aircraft right wing down is positive.
drift : dict or None
Drift angle of antenna, the angle between heading and track.
heading : dict or None
Heading (compass) angle, clockwise from north.
pitch : dict or None
Pitch angle of antenna, for aircraft nose up is positive.
georefs_applied : dict or None
Indicates whether the variables have had georeference calculation
applied. Leading to Earth-centric azimuth and elevation angles.
instrument_parameters : dict of dicts or None
Instrument parameters, if not provided this attribute is set to None,
indicating these parameters are not avaiable. This dictionary also
includes variables in the radar_parameters CF/Radial subconvention.
radar_calibration : dict of dicts or None
Instrument calibration parameters. If not provided this attribute is
set to None, indicating these parameters are not available
ngates : int
Number of gates (bins) in a ray.
nrays : int
Number of rays in the volume.
nsweeps : int
Number of sweep in the volume.
"""
def __init__(self, time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
altitude_agl=None,
target_scan_rate=None, rays_are_indexed=None,
ray_angle_res=None,
scan_rate=None, antenna_transition=None,
instrument_parameters=None,
radar_calibration=None,
rotation=None, tilt=None, roll=None, drift=None, heading=None,
pitch=None, georefs_applied=None,
):
if 'calendar' not in time:
time['calendar'] = 'gregorian'
self.time = time
self.range = _range
self.fields = fields
self.metadata = metadata
self.scan_type = scan_type
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.altitude_agl = altitude_agl # optional
self.sweep_number = sweep_number
self.sweep_mode = sweep_mode
self.fixed_angle = fixed_angle
self.sweep_start_ray_index = sweep_start_ray_index
self.sweep_end_ray_index = sweep_end_ray_index
self.target_scan_rate = target_scan_rate # optional
self.rays_are_indexed = rays_are_indexed # optional
self.ray_angle_res = ray_angle_res # optional
self.azimuth = azimuth
self.elevation = elevation
self.scan_rate = scan_rate # optional
self.antenna_transition = antenna_transition # optional
self.rotation = rotation # optional
self.tilt = tilt # optional
self.roll = roll # optional
self.drift = drift # optional
self.heading = heading # optional
self.pitch = pitch # optional
self.georefs_applied = georefs_applied # optional
self.instrument_parameters = instrument_parameters # optional
self.radar_calibration = radar_calibration # optional
self.ngates = len(_range['data'])
self.nrays = len(time['data'])
self.nsweeps = len(sweep_number['data'])
self.projection = {'proj': 'pyart_aeqd', '_include_lon_0_lat_0': True}
# initalize attributes with lazy load dictionaries
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
def __getstate__(self):
""" Return object's state which can be pickled. """
state = self.__dict__.copy() # copy the objects state
# Remove unpicklable entries (those which are lazily loaded
del state['rays_per_sweep']
del state['gate_x']
del state['gate_y']
del state['gate_z']
del state['gate_longitude']
del state['gate_latitude']
del state['gate_altitude']
return state
def __setstate__(self, state):
""" Restore unpicklable entries from pickled object. """
self.__dict__.update(state)
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
# Attribute init/reset method
def init_rays_per_sweep(self):
""" Initialize or reset the rays_per_sweep attribute. """
lazydic = LazyLoadDict(get_metadata('rays_per_sweep'))
lazydic.set_lazy('data', _rays_per_sweep_data_factory(self))
self.rays_per_sweep = lazydic
def init_gate_x_y_z(self):
""" Initialize or reset the gate_{x, y, z} attributes. """
gate_x = LazyLoadDict(get_metadata('gate_x'))
gate_x.set_lazy('data', _gate_data_factory(self, 0))
self.gate_x = gate_x
gate_y = LazyLoadDict(get_metadata('gate_y'))
gate_y.set_lazy('data', _gate_data_factory(self, 1))
self.gate_y = gate_y
gate_z = LazyLoadDict(get_metadata('gate_z'))
gate_z.set_lazy('data', _gate_data_factory(self, 2))
self.gate_z = gate_z
def init_gate_longitude_latitude(self):
"""
Initialize or reset the gate_longitude and gate_latitude attributes.
"""
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
def init_gate_altitude(self):
""" Initialize the gate_altitude attribute. """
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
# private functions for checking limits, etc.
def _check_sweep_in_range(self, sweep):
""" Check that a sweep number is in range. """
if sweep < 0 or sweep >= self.nsweeps:
raise IndexError('Sweep out of range: ', sweep)
return
# public check functions
def check_field_exists(self, field_name):
"""
Check that a field exists in the fields dictionary.
If the field does not exist raise a KeyError.
Parameters
----------
field_name : str
Name of field to check.
"""
if field_name not in self.fields:
raise KeyError('Field not available: ' + field_name)
return
# Iterators
def iter_start(self):
""" Return an iterator over the sweep start indices. """
return (s for s in self.sweep_start_ray_index['data'])
def iter_end(self):
""" Return an iterator over the sweep end indices. """
return (s for s in self.sweep_end_ray_index['data'])
def iter_start_end(self):
""" Return an iterator over the sweep start and end indices. """
return ((s, e) for s, e in zip(self.iter_start(), self.iter_end()))
def iter_slice(self):
""" Return an iterator which returns sweep slice objects. """
return (slice(s, e+1) for s, e in self.iter_start_end())
def iter_field(self, field_name):
""" Return an iterator which returns sweep field data. """
self.check_field_exists(field_name)
return (self.fields[field_name]['data'][s] for s in self.iter_slice())
def iter_azimuth(self):
""" Return an iterator which returns sweep azimuth data. """
return (self.azimuth['data'][s] for s in self.iter_slice())
def iter_elevation(self):
""" Return an iterator which returns sweep elevation data. """
return (self.elevation['data'][s] for s in self.iter_slice())
# get methods
def get_start(self, sweep):
""" Return the starting ray index for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_start_ray_index['data'][sweep]
def get_end(self, sweep):
""" Return the ending ray for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_end_ray_index['data'][sweep]
def get_start_end(self, sweep):
""" Return the starting and ending ray for a given sweep. """
return self.get_start(sweep), self.get_end(sweep)
def get_slice(self, sweep):
""" Return a slice for selecting rays for a given sweep. """
start, end = self.get_start_end(sweep)
return slice(start, end+1)
def get_field(self, sweep, field_name, copy=False):
"""
Return the field data for a given sweep.
When used with :py:func:`get_gate_x_y_z` this method can be used to
obtain the data needed for plotting a radar field with the correct
spatial context.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
field_name : str
Name of the field from which data should be retrieved.
copy : bool, optional
True to return a copy of the data. False, the default, returns
a view of the data (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
data : array
Array containing data for the requested sweep and field.
"""
self.check_field_exists(field_name)
s = self.get_slice(sweep)
data = self.fields[field_name]['data'][s]
if copy:
return data.copy()
else:
return data
def get_azimuth(self, sweep, copy=False):
"""
Return an array of azimuth angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the azimuths. False, the default, returns
a view of the azimuths (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the azimuth angles for a given sweep.
"""
s = self.get_slice(sweep)
azimuths = self.azimuth['data'][s]
if copy:
return azimuths.copy()
else:
return azimuths
def get_elevation(self, sweep, copy=False):
"""
Return an array of elevation angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the elevations. False, the default,
returns a view of the elevations (when possible), changing this
data will change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the elevation angles for a given sweep.
"""
s = self.get_slice(sweep)
elevation = self.elevation['data'][s]
if copy:
return elevation.copy()
else:
return elevation
def get_gate_x_y_z(self, sweep, edges=False, filter_transitions=False):
"""
Return the x, y and z gate locations in meters for a given sweep.
With the default parameter this method returns the same data as
contained in the gate_x, gate_y and gate_z attributes but this method
performs the gate location calculations only for the specified sweep
and therefore is more efficient than accessing this data through these
attribute.
When used with :py:func:`get_field` this method can be used to obtain
the data needed for plotting a radar field with the correct spatial
context.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
edges : bool, optional
True to return the locations of the gate edges calculated by
interpolating between the range, azimuths and elevations.
False (the default) will return the locations of the gate centers
with no interpolation.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
x, y, z : 2D array
Array containing the x, y and z, distances from the radar in
meters for the center (or edges) for all gates in the sweep.
"""
azimuths = self.get_azimuth(sweep)
elevations = self.get_elevation(sweep)
if filter_transitions and self.antenna_transition is not None:
sweep_slice = self.get_slice(sweep)
valid = self.antenna_transition['data'][sweep_slice] == 0
azimuths = azimuths[valid]
elevations = elevations[valid]
return antenna_vectors_to_cartesian(
self.range['data'], azimuths, elevations, edges=edges)
def get_gate_lat_lon_alt(self, sweep, reset_gate_coords=False,
filter_transitions=False):
"""
Return the longitude, latitude and altitude gate locations.
Longitude and latitude are in degrees and altitude in meters.
With the default parameter this method returns the same data as
contained in the gate_latitude, gate_longitude and gate_altitude
attributes but this method performs the gate location calculations
only for the specified sweep and therefore is more efficient than
accessing this data through these attribute. If coordinates have
at all, please use the reset_gate_coords parameter.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
reset_gate_coords : bool, optional
Optional to reset the gate latitude, gate longitude and gate
altitude attributes before using them in this function. This
is useful when the geographic coordinates have changed and gate
latitude, gate longitude and gate altitude need to be reset.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
lat, lon, alt : 2D array
Array containing the latitude, longitude and altitude,
for all gates in the sweep.
"""
s = self.get_slice(sweep)
if reset_gate_coords:
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
lat = self.gate_latitude['data'][s]
lon = self.gate_longitude['data'][s]
alt = self.gate_altitude['data'][s]
if filter_transitions and self.antenna_transition is not None:
valid = self.antenna_transition['data'][s] == 0
lat = lat[valid]
lon = lon[valid]
alt = alt[valid]
return lat, lon, alt
def get_nyquist_vel(self, sweep, check_uniform=True):
"""
Return the Nyquist velocity in meters per second for a given sweep.
Raises a LookupError if the Nyquist velocity is not available, an
Exception is raised if the velocities are not uniform in the sweep
unless check_uniform is set to False.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
check_uniform : bool
True to check to perform a check on the Nyquist velocities that
they are uniform in the sweep, False will skip this check and
return the velocity of the first ray in the sweep.
Returns
-------
nyquist_velocity : float
Array containing the Nyquist velocity in m/s for a given sweep.
"""
s = self.get_slice(sweep)
try:
nyq_vel = self.instrument_parameters['nyquist_velocity']['data'][s]
except:
raise LookupError('Nyquist velocity unavailable')
if check_uniform:
if np.any(nyq_vel != nyq_vel[0]):
raise Exception('Nyquist velocities are not uniform in sweep')
return float(nyq_vel[0])
# Methods
def info(self, level='standard', out=sys.stdout):
"""
Print information on radar.
Parameters
----------
level : {'compact', 'standard', 'full', 'c', 's', 'f'}, optional
Level of information on radar object to print, compact is
minimal information, standard more and full everything.
out : file-like, optional
Stream to direct output to, default is to print information
to standard out (the screen).
"""
if level == 'c':
level = 'compact'
elif level == 's':
level = 'standard'
elif level == 'f':
level = 'full'
if level not in ['standard', 'compact', 'full']:
raise ValueError('invalid level parameter')
self._dic_info('altitude', level, out)
self._dic_info('altitude_agl', level, out)
self._dic_info('antenna_transition', level, out)
self._dic_info('azimuth', level, out)
self._dic_info('elevation', level, out)
print('fields:', file=out)
for field_name, field_dic in self.fields.items():
self._dic_info(field_name, level, out, field_dic, 1)
self._dic_info('fixed_angle', level, out)
if self.instrument_parameters is None:
print('instrument_parameters: None', file=out)
else:
print('instrument_parameters:', file=out)
for name, dic in self.instrument_parameters.items():
self._dic_info(name, level, out, dic, 1)
self._dic_info('latitude', level, out)
self._dic_info('longitude', level, out)
print('nsweeps:', self.nsweeps, file=out)
print('ngates:', self.ngates, file=out)
print('nrays:', self.nrays, file=out)
if self.radar_calibration is None:
print('radar_calibration: None', file=out)
else:
print('radar_calibration:', file=out)
for name, dic in self.radar_calibration.items():
self._dic_info(name, level, out, dic, 1)
self._dic_info('range', level, out)
self._dic_info('scan_rate', level, out)
print('scan_type:', self.scan_type, file=out)
self._dic_info('sweep_end_ray_index', level, out)
self._dic_info('sweep_mode', level, out)
self._dic_info('sweep_number', level, out)
self._dic_info('sweep_start_ray_index', level, out)
self._dic_info('target_scan_rate', level, out)
self._dic_info('time', level, out)
# Airborne radar parameters
if self.rotation is not None:
self._dic_info('rotation', level, out)
if self.tilt is not None:
self._dic_info('tilt', level, out)
if self.roll is not None:
self._dic_info('roll', level, out)
if self.drift is not None:
self._dic_info('drift', level, out)
if self.heading is not None:
self._dic_info('heading', level, out)
if self.pitch is not None:
self._dic_info('pitch', level, out)
if self.georefs_applied is not None:
self._dic_info('georefs_applied', level, out)
# always print out all metadata last
self._dic_info('metadata', 'full', out)
def _dic_info(self, attr, level, out, dic=None, ident_level=0):
""" Print information on a dictionary attribute. """
if dic is None:
dic = getattr(self, attr)
ilvl0 = '\t' * ident_level
ilvl1 = '\t' * (ident_level + 1)
if dic is None:
print(str(attr) + ': None', file=out)
return
# make a string summary of the data key if it exists.
if 'data' not in dic:
d_str = 'Missing'
elif not isinstance(dic['data'], np.ndarray):
d_str = '<not a ndarray>'
else:
data = dic['data']
t = (data.dtype, data.shape)
d_str = '<ndarray of type: %s and shape: %s>' % t
# compact, only data summary
if level == 'compact':
print(ilvl0 + str(attr) + ':', d_str, file=out)
# standard, all keys, only summary for data
elif level == 'standard':
print(ilvl0 + str(attr) + ':', file=out)
print(ilvl1 + 'data:', d_str, file=out)
for key, val in dic.items():
if key == 'data':
continue
print(ilvl1 + key + ':', val, file=out)
# full, all keys, full data
elif level == 'full':
print(str(attr) + ':', file=out)
if 'data' in dic:
print(ilvl1 + 'data:', dic['data'], file=out)
for key, val in dic.items():
if key == 'data':
continue
print(ilvl1 + key + ':', val, file=out)
return
def add_field(self, field_name, dic, replace_existing=False):
"""
Add a field to the object.
Parameters
----------
field_name : str
Name of the field to add to the dictionary of fields.
dic : dict
Dictionary contain field data and metadata.
replace_existing : bool, optional
True to replace the existing field with key field_name if it
exists, loosing any existing data. False will raise a ValueError
when the field already exists.
"""
# check that the field dictionary to add is valid
if field_name in self.fields and replace_existing is False:
err = 'A field with name: %s already exists' % (field_name)
raise ValueError(err)
if 'data' not in dic:
raise KeyError("dic must contain a 'data' key")
if dic['data'].shape != (self.nrays, self.ngates):
t = (self.nrays, self.ngates)
err = "'data' has invalid shape, should be (%i, %i)" % t
raise ValueError(err)
# add the field
self.fields[field_name] = dic
return
def add_field_like(self, existing_field_name, field_name, data,
replace_existing=False):
"""
Add a field to the object with metadata from a existing field.
Note that the data parameter is not copied by this method.
If data refers to a 'data' array from an existing field dictionary, a
copy should be made within or prior to using this method. If this is
not done the 'data' key in both field dictionaries will point to the
same NumPy array and modification of one will change the second. To
copy NumPy arrays use the copy() method. See the Examples section
for how to create a copy of the 'reflectivity' field as a field named
'reflectivity_copy'.
Parameters
----------
existing_field_name : str
Name of an existing field to take metadata from when adding
the new field to the object.
field_name : str
Name of the field to add to the dictionary of fields.
data : array
Field data. A copy of this data is not made, see the note above.
replace_existing : bool, optional
True to replace the existing field with key field_name if it
exists, loosing any existing data. False will raise a ValueError
when the field already exists.
Examples
--------
>>> radar.add_field_like('reflectivity', 'reflectivity_copy',
... radar.fields['reflectivity']['data'].copy())
"""
if existing_field_name not in self.fields:
err = 'field %s does not exist in object' % (existing_field_name)
raise ValueError(err)
dic = {}
for k, v in self.fields[existing_field_name].items():
if k != 'data':
dic[k] = v
dic['data'] = data
return self.add_field(field_name, dic,
replace_existing=replace_existing)
def extract_sweeps(self, sweeps):
"""
Create a new radar contains only the data from select sweeps.
Parameters
----------
sweeps : array_like
Sweeps (0-based) to include in new Radar object.
Returns
-------
radar : Radar
Radar object which contains a copy of data from the selected
sweeps.
"""
# parse and verify parameters
sweeps = np.array(sweeps, dtype='int32')
if np.any(sweeps > (self.nsweeps - 1)):
raise ValueError('invalid sweeps indices in sweeps parameter')
if np.any(sweeps < 0):
raise ValueError('only positive sweeps can be extracted')
def mkdic(dic, select):
""" Make a dictionary, selecting out select from data key """
if dic is None:
return None
d = dic.copy()
if 'data' in d and select is not None:
d['data'] = d['data'][select].copy()
return d
# create array of rays which select the sweeps selected and
# the number of rays per sweep.
ray_count = (self.sweep_end_ray_index['data'] -
self.sweep_start_ray_index['data'] + 1)[sweeps]
ssri = self.sweep_start_ray_index['data'][sweeps]
rays = np.concatenate(
[range(s, s+e) for s, e in zip(ssri, ray_count)]).astype('int32')
# radar location attribute dictionary selector
if len(self.altitude['data']) == 1:
loc_select = None
else:
loc_select = sweeps
# create new dictionaries
time = mkdic(self.time, rays)
_range = mkdic(self.range, None)
fields = {}
for field_name, dic in self.fields.items():
fields[field_name] = mkdic(dic, rays)
metadata = mkdic(self.metadata, None)
scan_type = str(self.scan_type)
latitude = mkdic(self.latitude, loc_select)
longitude = mkdic(self.longitude, loc_select)
altitude = mkdic(self.altitude, loc_select)
altitude_agl = mkdic(self.altitude_agl, loc_select)
sweep_number = mkdic(self.sweep_number, sweeps)
sweep_mode = mkdic(self.sweep_mode, sweeps)
fixed_angle = mkdic(self.fixed_angle, sweeps)
sweep_start_ray_index = mkdic(self.sweep_start_ray_index, None)
sweep_start_ray_index['data'] = np.cumsum(
np.append([0], ray_count[:-1]), dtype='int32')
sweep_end_ray_index = mkdic(self.sweep_end_ray_index, None)
sweep_end_ray_index['data'] = np.cumsum(ray_count, dtype='int32') - 1
target_scan_rate = mkdic(self.target_scan_rate, sweeps)
azimuth = mkdic(self.azimuth, rays)
elevation = mkdic(self.elevation, rays)
scan_rate = mkdic(self.scan_rate, rays)
antenna_transition = mkdic(self.antenna_transition, rays)
# instrument_parameters
# Filter the instrument_parameter dictionary based size of leading
# dimension, this might not always be correct.
if self.instrument_parameters is None:
instrument_parameters = None
else:
instrument_parameters = {}
for key, dic in self.instrument_parameters.items():
if dic['data'].ndim != 0:
dim0_size = dic['data'].shape[0]
else:
dim0_size = -1
if dim0_size == self.nsweeps:
fdic = mkdic(dic, sweeps)
elif dim0_size == self.nrays:
fdic = mkdic(dic, rays)
else: # keep everything
fdic = mkdic(dic, None)
instrument_parameters[key] = fdic
# radar_calibration
# copy all field in radar_calibration as is except for
# r_calib_index which we filter based upon time. This might
# leave some indices in the "r_calib" dimension not referenced in
# the r_calib_index array.
if self.radar_calibration is None:
radar_calibration = None
else:
radar_calibration = {}
for key, dic in self.radar_calibration.items():
if key == 'r_calib_index':
radar_calibration[key] = mkdic(dic, rays)
else:
radar_calibration[key] = mkdic(dic, None)
return Radar(time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_end_ray_index,
azimuth, elevation,
altitude_agl=altitude_agl,
target_scan_rate=target_scan_rate,
scan_rate=scan_rate,
antenna_transition=antenna_transition,
instrument_parameters=instrument_parameters,
radar_calibration=radar_calibration)
def _rays_per_sweep_data_factory(radar):
""" Return a function which returns the number of rays per sweep. """
def _rays_per_sweep_data():
""" The function which returns the number of rays per sweep. """
return (radar.sweep_end_ray_index['data'] -
radar.sweep_start_ray_index['data'] + 1)
return _rays_per_sweep_data
def _gate_data_factory(radar, coordinate):
""" Return a function which returns the Cartesian locations of gates. """
def _gate_data():
""" The function which returns the Cartesian locations of gates. """
ranges = radar.range['data']
azimuths = radar.azimuth['data']
elevations = radar.elevation['data']
cartesian_coords = antenna_vectors_to_cartesian(
ranges, azimuths, elevations, edges=False)
# load x, y, and z data except for the coordinate in question
if coordinate != 0:
radar.gate_x['data'] = cartesian_coords[0]
if coordinate != 1:
radar.gate_y['data'] = cartesian_coords[1]
if coordinate != 2:
radar.gate_z['data'] = cartesian_coords[2]
return cartesian_coords[coordinate]
return _gate_data
def _gate_lon_lat_data_factory(radar, coordinate):
""" Return a function which returns the geographic locations of gates. """
def _gate_lon_lat_data():
""" The function which returns the geographic locations gates. """
x = radar.gate_x['data']
y = radar.gate_y['data']
projparams = radar.projection.copy()
if projparams.pop('_include_lon_0_lat_0', False):
projparams['lon_0'] = radar.longitude['data'][0]
projparams['lat_0'] = radar.latitude['data'][0]
geographic_coords = cartesian_to_geographic(x, y, projparams)
# set the other geographic coordinate
if coordinate == 0:
radar.gate_latitude['data'] = geographic_coords[1]
else:
radar.gate_longitude['data'] = geographic_coords[0]
return geographic_coords[coordinate]
return _gate_lon_lat_data
def _gate_altitude_data_factory(radar):
""" Return a function which returns the gate altitudes. """
def _gate_altitude_data():
""" The function which returns the gate altitudes. """
try:
return radar.altitude['data'] + radar.gate_z['data']
except ValueError:
return | np.mean(radar.altitude['data']) | numpy.mean |
# %%
from nilearn import surface
from nilearn import datasets
from nilearn import plotting
from nilearn import image
from nilearn import regions
from nilearn.surface import load_surf_mesh
import os
import pylab as plt
import numpy as np
from matplotlib import colors as clr
import matplotlib.patches as mpatches
import matplotlib
matplotlib.__version__
# import mayavi
# np.load('/home/ubuntu/hcp_data/jpg_256/split_videos_256x256/7T_MOVIE4_HO2_v2_256x256/7T_MOVIE4_HO2_v2_256x256_seg_0_72.npy').shape
# %%
def get_faces(faces, parc_idx):
'''Returns a boolean array indicating if a faces from lies on the outer edge of the parcellation defined by the indices in parc_idx
IN:
faces - numpy ndarray of shape (n, 3), containing indices of the mesh faces
parc_idx - indices of the vertices belonging to the region that is to be plotted
'''
faces_in_parc = np.array([np.isin(face, parc_idx) for face in faces])
vertices_on_edge = np.intersect1d(np.unique(faces[faces_in_parc.sum(axis=1)==2]), parc_idx)
faces_outside_edge = np.array([np.isin(face, vertices_on_edge) for face in faces]).sum(axis=1)
faces_outside_edge = np.logical_and(faces_outside_edge > 0, faces_in_parc.sum(axis=1)<3)
return faces_outside_edge
def modify_facecolors(new_color, faces_to_modify, axes):
'''Modifies colors of mesh in axes by replacing all faces in faces_to_modify with new_color'''
if isinstance(new_color, str):
new_color = np.array(clr.to_rgb(color)+(1.,))
poly = axes.collections[0]
# fcolors = poly._facecolor
fcolors = poly._facecolors3d
# _facecolor
fcolors[faces_outside] = np.array(new_color)
poly._facecolors3d = fcolors
return axes
# %%
# path parameters and fsaverage
main_dir = '/home/ubuntu'
result_dir = os.path.join(main_dir, 'results', 'cross_subject')
fsaverage = datasets.fetch_surf_fsaverage(mesh='fsaverage5')
# /home/ubuntu/tk_trial/LSTM/results/cross_subject/groupbeta_0.nii.gz
# group_fname = os.path.join(result_dir, "groupbeta_0.nii.gz")
group_fname = '/home/ubuntu/tk_trial/LSTM/results/cross_subject/groupbeta_0.nii.gz'
group = image.load_img(group_fname)
group0_surf_R = surface.vol_to_surf(group, fsaverage.pial_right)
group0_surf_L = surface.vol_to_surf(group, fsaverage.pial_left)
group_fname = '/home/ubuntu/tk_trial/LSTM/results/cross_subject/groupbeta_1.nii.gz'
group = image.load_img(group_fname)
group1_surf_R = surface.vol_to_surf(group, fsaverage.pial_right)
group1_surf_L = surface.vol_to_surf(group, fsaverage.pial_left)
group_fname = '/home/ubuntu/tk_trial/LSTM/results/cross_subject/groupbeta_2.nii.gz'
group = image.load_img(group_fname)
group2_surf_R = surface.vol_to_surf(group, fsaverage.pial_right)
group2_surf_L = surface.vol_to_surf(group, fsaverage.pial_left)
group_fname = '/home/ubuntu/tk_trial/LSTM/results/cross_subject/groupbeta_3.nii.gz'
group = image.load_img(group_fname)
group3_surf_R = surface.vol_to_surf(group, fsaverage.pial_right)
group3_surf_L = surface.vol_to_surf(group, fsaverage.pial_left)
# %%
# outline
# TPJ outline _____________________________________________________________________________________
action_uniformity_fname = '/home/ubuntu/feature-encoding/viz/brain_plot/action_uniformity-test_z_FDR_0.01.nii.gz'
neurosynth_thres = 0.
thres = 1.
fig, axes = plt.subplots(1,1,subplot_kw={'projection':'3d'}, figsize=(9, 6))
plotting.plot_surf_stat_map(fsaverage.infl_right, group0_surf_R, hemi='right',
title='Surface right hemisphere', colorbar=True,
threshold=1., bg_map=fsaverage.sulc_right,
figure = fig, axes=axes)
plotting.show()
# c = plotting.plot_surf(fsaverage.infl_right,empathy_rev_surf_R, hemi='right',
# title='Surface right hemisphere',
# bg_map=fsaverage.sulc_right, alpha = alpha_thres, threshold = thres,
# cmap = 'BuPu_r', figure = fig, axes=axes, avg_method = 'median')
action_ns = image.threshold_img(action_uniformity_fname,
threshold=neurosynth_thres,
copy=False)
texture = surface.vol_to_surf(action_ns, fsaverage.pial_right)
# plotting.plot_surf_stat_map(fsaverage.infl_right, texture, hemi='right',
# title='Surface right hemisphere', colorbar=False,
# bg_map=fsaverage.sulc_right, alpha = 0., threshold=thres,
# figure = fig, axes=axes)
# https://mjboos.github.io/Nilearn-surface-contours/
# coords, faces = surface.load_surf_mesh(fsaverage.infl_right)
# load vertex coordinates and face indices that specify the surface mesh
coords, faces = load_surf_mesh(fsaverage.infl_right)
destrieux_atlas = datasets.fetch_atlas_surf_destrieux()
parcellation = destrieux_atlas['map_right']
# these are the regions we want to outline
regions = [b'G_pariet_inf-Angular',
b'G_precentral', b'G_postcentral']
regions_idx = [np.where( | np.array(destrieux_atlas['labels']) | numpy.array |
import pandas as pd
import numpy as np
import datetime
from sklearn.utils.class_weight import compute_sample_weight
from termcolor import colored
from tqdm import tqdm
np.random.seed(1)
DISTANCE=60
TIMESTEP_SIZE=60
BATCH_SIZE=256
TEST_RATIO=0.25
PRINT_DATA_EACH=50
TEST_EACH=PRINT_DATA_EACH*10
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
df=pd.read_csv("bitstampUSD_1-min_data_2012-01-01_to_2019-03-13.csv")
split_date="01/01/2017"
split_timestamp=datetime.datetime.strptime(split_date, "%d/%m/%Y").timestamp()
data=[]
c=0
last_d=None
for d in df.values:
if d[0]>=split_timestamp and (not np.isnan(d[4])):
if np.isnan(d[4]):
data.append(last_d[4])
else:
last_d=d
data.append(d[4])
split_index=int(len(data)*TEST_RATIO)
train_data=np.array(data[:-split_index])
test_data=np.array(data[-split_index:])
def get_x(data,start_index):
sampled_x=np.expand_dims(data[start_index:start_index+TIMESTEP_SIZE],axis=1)
sampled_x=(sampled_x/(sampled_x[0]+1e-8))-1 #the normalization step
return sampled_x
def get_y(data,start_index):
seq=data[start_index+TIMESTEP_SIZE:start_index+TIMESTEP_SIZE+DISTANCE]
st_p=seq[0]
y_ = np.mean(seq) / st_p
return int(y_>1)
ys_train_data_is={0:[],1:[]}
for i in tqdm(range(len(train_data)-TIMESTEP_SIZE-DISTANCE),desc='ys_train_data_is'):
y_=get_y(train_data,i)
ys_train_data_is[y_].append(i)
total_=sum(map(len,ys_train_data_is.values()))
print(" , ".join(["{}:{:.5%}".format(key,len(value)/total_) for key,value in ys_train_data_is.items()]))
def generator(data,BATCH_SIZE):
while True:
start_index_0=np.random.choice(ys_train_data_is[0],size=BATCH_SIZE//2)
start_index_1=np.random.choice(ys_train_data_is[1],size=BATCH_SIZE//2)
x=[]
y=[]
for start_index in np.append(start_index_0,start_index_1):
sampled_y=get_y(data,start_index)
sampled_x=get_x(data,start_index)
x.append(sampled_x)
y.append(sampled_y)
yield np.array(x),np.array(y)
def get_sample_weight(y):
return compute_sample_weight("balanced",y)
from keras import backend as K
###################################################
#This code is to limit the amount of gpu memory that the model use for training
config = K.tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
sess = K.tf.Session(config=config)
K.set_session(sess)
###################################################
from keras.models import Model,load_model
from keras import layers as kl
from keras import optimizers
def average_pred(y_true,y_pred):
return K.mean(y_pred)
li=kl.Input(shape=(TIMESTEP_SIZE,1))
l=li
for n_units in [16,32,64,128]:
l=kl.Conv1D(n_units,3,activation='elu',padding='same')(l)
l=kl.MaxPooling1D()(l)
l=kl.Flatten()(l)
l=kl.Dense(64,activation='elu')(l)
l=kl.Dense(1,activation='sigmoid')(l)
model=Model(li,l)
model.compile(optimizers.Adamax(lr=0.002),"binary_crossentropy",['acc',average_pred])
if not args.test:
from keras.callbacks import TensorBoard
import os
iteration=0
res=[]
print_msg="iteration: {} : loss: {:.6f}, acc: {:.4%}, avg_pred: {:.4f}, avg_y: {:.4f}, left_iter_to_test: {}"
best_score=np.inf
for filename in os.listdir("./logs"):
os.remove("./logs/{}".format(filename))
tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,batch_size=BATCH_SIZE,
write_graph=True, write_images=False)
tensorboard.set_model(model)
for x,y in generator(train_data,BATCH_SIZE):
r=model.train_on_batch(x,y)
tensorboard.on_epoch_end(iteration,{'train_loss':r[0],'train_acc':r[1]})
r+=[np.mean(y)]
res.append(r)
iteration+=1
if iteration%PRINT_DATA_EACH==0:
print(print_msg.format(iteration,*np.mean(res,axis=0), TEST_EACH-((iteration-1)%TEST_EACH)))
res=[]
if iteration%(TEST_EACH)==0:
true=[]
test=[]
for i in tqdm(range(len(test_data)-TIMESTEP_SIZE-DISTANCE),desc="testing"):
test.append(get_x(test_data,i))
true.append(get_y(test_data,i))
true,test=np.array(true),np.array(test)
pred=model.evaluate(test,true,sample_weight=get_sample_weight(true),verbose=1,batch_size=BATCH_SIZE)
msg=''
if best_score>pred[0]:
best_score=pred[0]
model.save("model.h5")
msg+=", FOUND A NEW SCORE"
tensorboard.on_epoch_end(iteration,{'test_loss':pred[0],'test_acc':pred[1]})
print(colored("res: {}{}".format( | np.array(pred) | numpy.array |
from autodiff import variable
from autodiff.utils import get_vars
import numpy as np
import math
class Function():
def __init__(self, dim:int, *args):
self.func = [None] * dim
self.dim = dim
vars = []
for v in args:
vars.append(v)
self.vars = vars
def eval(self, **kwargs):
"""
evaluate at given environment
param:
kwargs : variable-values should be given, default 1
return:
value-vector (1d numpy array)
"""
res = np.zeros(self.dim)
env = {}
for var in self.vars:
env[var] = kwargs.get(var, 1)
for i in range(0, self.dim):
res[i] = self.func[i].eval(**env)
return res
def autodiff(self, var:str, **kwargs):
"""
calculates gradient for var at given envirnment
param:
var : variable-name to calculate derivative for, e.g. "x"
kwargs : variable-values should be given, default 1
return:
gradiant-vector (1d numpy array)
"""
res = np.zeros(self.dim)
env = {}
for var in self.vars:
env[var] = kwargs.get(var, 1)
for i in range(0, self.dim):
res[i] = self.func[i].autodiff(var, **env)
return res
def jacobian(self, **kwargs):
"""
calculates Jacobian-Matrix at given env
param:
kwargs : variable-values should be given, default 1
return:
jacobian-matrix (2d numpy array)
"""
env = {}
for var in self.vars:
env[var] = kwargs.get(var, 1)
jac = np.zeros((self.dim, len(self.vars)), dtype="float32")
for i in range(0, self.dim):
for j in range(0, len(self.vars)):
jac[i,j] = self.func[i].autodiff(self.vars[j], **env)
return jac
def variance(self, covmat, pos):
"""
calculates covarianz-matrix for values
param:
covmat : covariance-matrix for input variables as numpy array
pos : space-vector (in order of variables, given as 1d numpy array)
return:
covarainz-matrix (2d numpy array)
"""
env = {}
for i in range(0, len(self.vars)):
env[self.vars[i]] = pos[i]
jac = self.jacobian(**env)
return np.matmul(np.matmul(jac, covmat), np.transpose(jac))
def covar_from_val(self, values):
"""
calculates covarianz-matrix for values
param:
value : 2d numpy array, rows correspond to one dataset, columns in order of variables
return:
covarainz-matrix (2d numpy array)
"""
cov = cross_covariance(values)
env = {}
m = np.sum(values, axis=0) / values.shape[0]
for i in range(0, len(self.vars)):
env[self.vars[i]] = m[i]
jac = self.jacobian(**env)
return np.matmul(np.matmul(jac, cov), np.transpose(jac))
def vars(self):
return self.vars
def __getitem__(self, key):
if key < 0 or key > self.dim:
raise ValueError("index out of range")
return self.func[key]
def __setitem__(self, key, value):
if key < 0 or key > self.dim:
raise ValueError("index out of range")
for var in get_vars(value):
if var not in self.vars:
raise ValueError("variale mismatch")
self.func[key] = value
def __str__(self):
s = "[ " + self.func[0].__str__() + "\n"
for i in range(1, self.dim-1):
s += " " + self.func[i].__str__() + "\n"
s += " " + self.func[self.dim-1].__str__() + " ]"
return s
def cross_covariance(valuematrix):
"""
calculates cross covariance matrix from value matrix
param:
valuematrix: rows correspond to datasets, columns to variables
returns:
covariance matrix as numpy array
"""
s = valuematrix.shape
if len(s) > 2 or len(s) < 0:
raise ValueError("wrong value_matrix format")
m = np.sum(valuematrix, axis=0) / s[0]
v = valuematrix - m
cov = np.matmul( | np.transpose(v) | numpy.transpose |
import unittest
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from clusterz.algs.kzmedian import (
DistributedKZMedian, BELDistributedKMedian, k_median_my, kz_median, KZMedian, KMedianWrapped
)
class MyTestCase(unittest.TestCase):
def setUp(self):
cluster = np.random.uniform(-1, 1, size=(40, 2))
self.centers_ = np.array([
[0, 30], [0, -30]
])
self.outliers_ = np.array([
[80, 0], [-80, 0]
])
# data set on a single machine
self.X_without_outliers_ = np.vstack(
[self.centers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + np.array([-5, 0]),
cluster + self.centers_[1] + np.array([5, 0]),
cluster + self.centers_[1] + np.array([-5, 0])])
self.X_with_outliers_ = np.vstack(
[self.centers_,
self.outliers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + np.array([-5, 0]),
cluster + self.centers_[1] + np.array([5, 0]),
cluster + self.centers_[1] + np.array([-5, 0])])
self.random_weight_without_outliers_ = np.hstack(
[np.ones(2) * 2,
np.random.uniform(1, 2, len(cluster) * 4)])
self.random_weight_ = np.hstack(
[np.ones(2),
np.ones(2),
np.random.uniform(1, 2, len(cluster) * 4)])
self.uniform_weight_ = np.ones(len(self.X_with_outliers_))
# data on 2 machines
self.Xs_without_outliers_ = [
np.vstack(
[self.centers_[0],
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[1] + np.array([-5, 0])]),
np.vstack(
[self.centers_[1],
# clusters
cluster + self.centers_[0] + | np.array([-5, 0]) | numpy.array |
from __future__ import division
import numpy as np
from numpy import pi, sqrt, exp, power, log, log10
import os
import constants as ct
import particle as pt
import tools as tl
##############################
# Preparing SKA configurations
##############################
def initialize():
"""This routine is supposed to be run only once, \
i.e. when the module is loaded, therefore\
the I/O is not optimized for speed concerns.
"""
SKA_conf = {}
# # --------------
for exper in ['low', 'mid']:
# if exper == "low":
# path = local_path + "/data/SKA1-low_accumu.csv"
# elif exper == "mid":
# path = local_path + "/data/SKA1-mid_accumu.csv"
# data_raw = np.loadtxt(path, delimiter=',')
# radius = data_raw[:, 0]
# fraction = data_raw[:, 1]
# bins_radius = np.logspace(1, 5, 20) # bin it
# hist_radius = np.interp(np.log10(bins_radius), np.log10(
# radius), fraction, left=0) # sample at the bin edges
# if exper == "low":
# # compute the x-y coordinates of all units
# x_arr, y_arr = get_telescope_coordinate(
# fraction*ct._SKALow_number_of_stations_, radius, SKA=exper)
# # save it
# SKA_conf['low radius'] = (data_raw, x_arr, y_arr, bins_radius,
# hist_radius)
# elif exper == "mid":
# x_arr, y_arr = get_telescope_coordinate(
# fraction*ct._SKA1Mid_number_of_dishes_, radius, SKA=exper)
# SKA_conf['mid radius'] = (data_raw, x_arr, y_arr, bins_radius,
# hist_radius)
# get coordinates
if exper == "low":
SKA_conf['low0'] = np.loadtxt(
local_path + "/data/SKA1_config_low0.csv", delimiter=',')
SKA_conf['low1'] = np.loadtxt(
local_path + "/data/SKA1_config_low1.csv", delimiter=',')
SKA_conf['low2'] = np.loadtxt(
local_path + "/data/SKA1_config_low2_6clusters.csv", delimiter=',')
# update clusters, it's 6 stations per cluster
new_arr = []
for xy in (SKA_conf['low2']):
for j in range(2):
for k in range(3):
x = xy[0] + j*50
y = xy[1] + (k-1)*50
new_arr.append([x, y])
new_arr = np.array(new_arr)
SKA_conf['low2'] = new_arr
# combine them
SKA_conf['low_coord'] = np.concatenate(
(SKA_conf['low0'], SKA_conf['low1'], SKA_conf['low2']))
x_arr = SKA_conf['low_coord'][:, 0]
y_arr = SKA_conf['low_coord'][:, 1]
elif exper == "mid":
SKA_conf['mid0_MeerKAT'] = np.loadtxt(
local_path + "/data/SKA1_config_mid0_MK.csv", delimiter=',')
SKA_conf['mid0_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid0_SKA.csv", delimiter=',')
SKA_conf['mid1_MeerKAT'] = np.loadtxt(
local_path + "/data/SKA1_config_mid1_MK.csv", delimiter=',')
SKA_conf['mid1_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid1_SKA.csv", delimiter=',')
SKA_conf['mid2_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid2_SKA.csv", delimiter=',')
# combine them
SKA_conf['mid_coord'] = np.concatenate(
(SKA_conf['mid0_MeerKAT'],
SKA_conf['mid0_SKA'],
SKA_conf['mid1_MeerKAT'],
SKA_conf['mid1_SKA'],
SKA_conf['mid2_SKA']))
# convert km to m
SKA_conf['mid_coord'][:, 0] = SKA_conf['mid_coord'][:, 0]*1.e3
SKA_conf['mid_coord'][:, 1] = SKA_conf['mid_coord'][:, 1]*1.e3
x_arr = SKA_conf['mid_coord'][:, 0]
y_arr = SKA_conf['mid_coord'][:, 1]
# get baseline distribution
baseline_arr = get_baseline(x_arr, y_arr)
hist_baseline, bins_baseline = np.histogram(
baseline_arr, bins=np.logspace(1, 5, 20000))
# correcting the over-counting of baseline pair
hist_baseline = hist_baseline/2.
hist_baseline_cumsum = np.cumsum(hist_baseline)
# save it
if exper == "low":
SKA_conf['low baseline'] = (
baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum)
elif exper == "mid":
SKA_conf['mid baseline'] = (
baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum)
# about effective area
if exper == "low":
path = local_path + "/data/SKA1-low_Aeff_over_Tsys.txt"
data_raw = np.loadtxt(path)
# low is given in MHz, convert to GHz
data_raw[:, 0] = data_raw[:, 0] * 1.e-3
SKA_conf['low A/T'] = data_raw
elif exper == "mid":
path = local_path + "/data/SKA1-mid_Aeff_over_Tsys.txt"
data_raw = np.loadtxt(path)
SKA_conf['mid A/T'] = data_raw
SKA_conf['A/T'] = np.concatenate((SKA_conf['low A/T'],
SKA_conf['mid A/T']))
# computing efficiency
# make a nu grid
Nsteps = 2001
nulow = np.logspace(log10(ct._nu_min_ska_low_), log10(
ct._nu_max_ska_low_), Nsteps//2)[1:]
# ... and SKA mid...
numid = np.logspace(log10(ct._nu_min_ska_mid_), log10(
ct._nu_max_ska_mid_), Nsteps - Nsteps//2)[1:]
Aeff_over_Tsys = SKA_conf['A/T']
# Mid
nu_arr = numid
Aeff_over_Tsys_arr = np.interp(
nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2])
Tsys_arr = T_sys_mid(nu_arr)
eta_arr = Aeff_over_Tsys_arr * Tsys_arr / ct._area_ska_mid_
SKA_conf['eta mid'] = (nu_arr, eta_arr)
# Low
nu_arr = nulow
Aeff_over_Tsys_arr = np.interp(
nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2])
Tsys_arr = T_sys_low(nu_arr)
eta_arr = Aeff_over_Tsys_arr * Tsys_arr / ct._area_ska_low_
SKA_conf['eta low'] = (nu_arr, eta_arr)
# combined storage
nu_arr = np.concatenate((SKA_conf['eta low'][0], SKA_conf['eta mid'][0]))
eta_arr = np.concatenate((SKA_conf['eta low'][1], SKA_conf['eta mid'][1]))
SKA_conf['eta'] = (nu_arr, eta_arr)
return SKA_conf
################
# SKA properties
################
def SKA_get_active_baseline(length, exper_mode):
"""Get the active number of baselines in the interferometry mode
:param length: critical baseline below which the signal can be resolved
:param exper_mode: "SKA low" or "SKA mid"
:returns: number of baselines that sees the signal
"""
length_arr, is_scalar = tl.treat_as_arr(length)
if exper_mode == "SKA low":
(baseline_arr, hist_baseline, bins_baseline,
hist_baseline_cumsum) = SKA_conf['low baseline']
if exper_mode == "SKA mid":
(baseline_arr, hist_baseline, bins_baseline,
hist_baseline_cumsum) = SKA_conf['mid baseline']
res = np.interp(np.log(length_arr), np.log(bins_baseline[:-1]),
hist_baseline_cumsum, left=ct._zero_)
if exper_mode == "SKA low":
res[length_arr < ct._SKALow_station_diameter_] = ct._zero_
if exper_mode == "SKA mid":
res[length_arr < ct._SKA1Mid_dish_diameter_] = ct._zero_
if is_scalar:
res = np.squeeze(res)
return res
def SKA_exper_nu(nu):
"""
Returns the SKA experiment mode (low/mid) sensitive to the given frequency nu [GHz].
Parameters
----------
nu : frequency [GHz]
"""
if (nu < ct._nu_min_ska_low_): # frequency below SKA low lower threshold
exper_mode = None # just a placeholder, won't matter
elif (nu <= ct._nu_max_ska_low_): # frequency within SKA low range
exper_mode = 'SKA low'
elif (nu <= ct._nu_max_ska_mid_): # frequency within SKA mid range
exper_mode = 'SKA mid'
else: # frequency above SKA mid upper threshold
exper_mode = None # just a placeholder, won't matter
return exper_mode
def SKA_specs(nu, exper_mode, correlation_mode=None, theta_sig=None):
"""
Returns the SKA specifications for the given experiment mode and frequency [GHz]:
area [m^2],
window,
receiver noise brightness temperature [K],
efficiency,
solid angle resolution [sr],
number_of_dishes, and
number_of_measurements.
Parameters
----------
nu : frequency [GHz]
exper_mode : mode in which the experiment is working
correlation_mode: whether to run in interferometry mode or single dish mode. Default None is meant to raise error if not assigned explicitly.
theta_sig: the signal size we want to observe [radian]
"""
if exper_mode == None:
area, window, Tr, eta, Omega_res, number_of_dishes, number_of_measurements = 0., 0., 0., 0., 1.e-100, 0., 0. # set to zero so it will raise error if not treated
elif exper_mode == 'SKA low' and correlation_mode == "single dish":
area = ct._area_ska_low_
window = np.heaviside(nu - ct._nu_min_ska_low_, 1.) * \
np.heaviside(ct._nu_max_ska_low_ - nu, 1.)
# Tr = ct._Tr_ska_low_ # DEPRECATED
Tr = Trec_low(nu)
eta = eta_nu(nu)
# finding resolution:
wavelength = pt.lambda_from_nu(nu)/100. # wavelength [m]
# angular size of pixel resolution [rad]
# assuming this is the aperture angle and not the radial angle
theta_res = (1.22*wavelength) / \
ct._SKALow_station_diameter_ # /sqrt(eta)
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
number_of_dishes = ct._SKALow_number_of_stations_
number_of_measurements = number_of_dishes
# Omega_max = np.inf # being sloppy here but we never reach FOV
elif exper_mode == 'SKA low' and correlation_mode == "interferometry":
window = np.heaviside(nu - ct._nu_min_ska_low_, 1.) * \
np.heaviside(ct._nu_max_ska_low_ - nu, 1.)
# Tr = ct._Tr_ska_low_ # DEPRECATED
Tr = Trec_low(nu)
eta = eta_nu(nu)
# get the required baseline length for nu
wavelength = pt.lambda_from_nu(nu) / 100. # wavelength [m]
critical_baseline_length = (
1.22*wavelength) / (theta_sig)\
* ct._SKA_factor_lose_signal_ # fudge factor for when invisible
# get the active number of baselines
active_number_of_baselines = SKA_get_active_baseline(
critical_baseline_length, exper_mode='SKA low')
# taking the resolution to be exactly the signal size
# penalty is taken care of through active_number_of_baselines
theta_res = theta_sig
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
# for interferometry mode noise has 1/sqrt(number of active baselines)
number_of_measurements = active_number_of_baselines
# NOTE: N.B.: this reception area is the total area, and is correct only assuming all dishes/stations contribute
# which is NOT true for large signal angular size. The code needs to be updated to include the fact that
# only active dishes/stations/telescopes are contributing. Thus, for large signal angular sizes,
# the individual values of the S and N CANNOT BE TRUSTED.
# However, since S and N scale the same with reception area, S/N cancels out
# in the end only the number of measurements (baselines) matter.
# Therefore, our S/N CAN INDEED be trusted.
area = ct._area_ska_low_
number_of_dishes = ct._SKALow_number_of_stations_
elif exper_mode == 'SKA mid' and correlation_mode == "single dish":
area = ct._area_ska_mid_
window = np.heaviside(nu - ct._nu_min_ska_mid_, 0.) * \
np.heaviside(ct._nu_max_ska_mid_ - nu, 1.)
# Tr = ct._Tr_ska_mid_ # DEPRECATED, AND INCONSISTENT
Tr = Trec_mid(nu)
eta = eta_nu(nu)
# finding resolution:
wavelength = pt.lambda_from_nu(nu)/100. # wavelength [m]
# angular size of pixel resolution [rad]
# assuming this is the aperture angle and not the radial angle
# theta_res = (1.22*wavelength)/sqrt(eta*4.*area/pi)
theta_res = (1.22*wavelength)/ct._SKA1Mid_dish_diameter_ # /sqrt(eta)
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
number_of_dishes = ct._SKA1Mid_number_of_dishes_
number_of_measurements = number_of_dishes
# Omega_max = np.inf # being sloppy here but we never reach FOV
elif exper_mode == 'SKA mid' and correlation_mode == "interferometry":
area = ct._area_ska_mid_
window = np.heaviside(nu - ct._nu_min_ska_mid_, 0.) * \
np.heaviside(ct._nu_max_ska_mid_ - nu, 1.)
# Tr = ct._Tr_ska_mid_ # DEPRECATED, AND INCONSISTENT
Tr = Trec_mid(nu)
eta = eta_nu(nu)
# get the required baseline length for nu
wavelength = pt.lambda_from_nu(nu) / 100. # wavelength [m]
critical_baseline_length = (
1.22*wavelength) / (theta_sig)\
* ct._SKA_factor_lose_signal_ # fudge factor
# get the active number of baselines
active_number_of_baselines = SKA_get_active_baseline(
critical_baseline_length, exper_mode='SKA mid')
# taking the resolution to be exactly the signal size
# penalty is taken care of through active_num_of_baselines
theta_res = theta_sig
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
# for interferometry mode noise has 1/sqrt(number of active baselines)
number_of_measurements = active_number_of_baselines
# NOTE: N.B.: this reception area is the total area, and is correct only assuming all dishes/stations contribute
# which is NOT true for large signal angular size. The code needs to be updated to include the fact that
# only active dishes/stations/telescopes are contributing. Thus, for large signal angular sizes,
# the individual values of the S and N CANNOT BE TRUSTED.
# However, since S and N scale the same with reception area, S/N cancels out
# in the end only the number of measurements (baselines) matter.
# Therefore, our S/N CAN INDEED be trusted.
area = ct._area_ska_mid_
number_of_dishes = ct._SKA1Mid_number_of_dishes_
# in case the number of baselines is zero
if number_of_measurements == 0:
number_of_measurements = 1e-100
return area, window, Tr, eta, Omega_res, number_of_dishes, number_of_measurements
# def get_telescope_coordinate(tel_arr, r_arr, SKA):
# """Generate an array with coordinate of each telescope computed
# :param tele_arr: the array of telescope index from 1 to (number of telescope)
# :param radius_arr: the radius of each telescope
# :param SKA: "low" or "mid"
# """
# if SKA == "low":
# tel_fine_arr = np.arange(ct._SKALow_number_of_stations_)
# r_core = ct._SKALow_r_core_
# elif SKA == "mid":
# tel_fine_arr = np.arange(ct._SKA1Mid_number_of_dishes_)
# r_core = ct._SKA1Mid_r_core_
# r_fine_arr = np.interp(tel_fine_arr, tel_arr, r_arr)
# # fix seed as we don't really need the randomness
# np.random.seed(123)
# theta_arr = np.random.random(size=len(r_fine_arr)) * np.pi * 2.
# # over write the arm part
# # mask = np.where(r_fine_arr > r_core, True, False)
# for i in tel_fine_arr:
# if r_fine_arr[int(i)] > r_core:
# theta_arr[int(i)] = int(i) % 3 * 2. * np.pi / 3.
# x_arr = r_fine_arr * np.cos(theta_arr)
# y_arr = r_fine_arr * np.sin(theta_arr)
# return x_arr, y_arr
def get_baseline(x_arr, y_arr):
"""Given array coordinates x, y, compute lengths of each pair. Returns the array of pair lengths.
:param x_arr: x coordinate of all units
:param y_arr: y coordinates of all units
"""
n_unit = len(x_arr)
# n_baseline = int(n_unit * (n_unit - 1) / 2.)
baseline_arr = np.zeros((n_unit, n_unit))
for i in range(n_unit):
for j in range(n_unit):
# print("x[i]=%s, y[j]=%s" % (x_arr[i], y_arr[j]))
dist = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)
baseline_arr[i, j] = dist
# baseline_arr[j, i] = dist
baseline_arr = baseline_arr.reshape(-1)
baseline_arr = baseline_arr[baseline_arr > 0]
return baseline_arr
def Trec_mid_MeerKAT(nu):
"""Receiver noise temperature [K] of a MeerKAT dish (13.5m-diameter type)
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
res = []
for nui in nu:
if 0.58 < nui < 1.02:
res.append(11 - 4.5*(nui-0.58))
elif 1.02 < nui < 1.67:
res.append(7.5 + 6.8 * np.abs(nui - 1.65)**1.5)
elif 1.65 < nui < 3.05:
res.append(7.5)
else:
res.append(np.inf)
if is_scalar:
res = np.squeeze(res)
return np.array(res)
def Trec_mid_SKA(nu):
"""Receiver noise temperature [K] of a SKA dish (15m-diameter type)
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
res = []
for nui in nu:
if 0.35 < nui < 0.95:
res.append(15 + 30*(nui-0.75)**2)
elif 0.95 < nui < 4.6:
res.append(7.5)
elif 4.6 < nui < 50:
res.append(4.4+0.69 * nui)
else:
res.append(np.inf)
if is_scalar:
res = np.squeeze(res)
return np.array(res)
def Trec_mid(nu):
"""Receiver noise temperature [K] of a typical SKA1-mid dish. Combines MeerKAT with SKA dishes. If there's only SKA dish, use that one; if there are both, use a weighted mean.
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
Trec_arr = []
for nui in nu:
val1 = Trec_mid_MeerKAT(nui)
val2 = Trec_mid_SKA(nui)
if np.isinf(val1):
val1 = val2
# val = np.sqrt(val1*val2) # NOTE: geometric mean puts them on equal footing, even if there was but a single MeerKAT telescope!!!
val = (val1*64. + val2*133.)/(133.+64.) # weighted mean: seems fairer
Trec_arr.append(val)
Trec_arr = np.array(Trec_arr)
if is_scalar:
Trec_arr = np.squeeze(Trec_arr)
return Trec_arr
def Trec_low(nu):
"""Receiver noise temperature [K] of a typical SKA1-low dish.
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
Trec_arr = np.ones_like(nu) * ct._Tr_ska_low_
if is_scalar:
Trec_arr = | np.squeeze(Trec_arr) | numpy.squeeze |
from gymenv_v2 import make_multiple_env
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import wandb
wandb.login()
run=wandb.init(project="finalproject", entity="ieor-4575", tags=["test"])
#run=wandb.init(project="finalproject", entity="ieor-4575", tags=["training-hard"])
#run=wandb.init(project="finalproject", entity="ieor-4575", tags=["test"])
### TRAINING
# Setup: You may generate your own instances on which you train the cutting agent.
custom_config = {
"load_dir" : 'instances/randomip_n60_m60', # this is the location of the randomly generated instances (you may specify a different directory)
"idx_list" : list(range(20)), # take the first 20 instances from the directory
"timelimit" : 50, # the maximum horizon length is 50
"reward_type" : 'obj' # DO NOT CHANGE reward_type
}
# Easy Setup: Use the following environment settings. We will evaluate your agent with the same easy config below:
easy_config = {
"load_dir" : 'instances/train_10_n60_m60',
"idx_list" : list(range(10)),
"timelimit" : 50,
"reward_type" : 'obj'
}
# Hard Setup: Use the following environment settings. We will evaluate your agent with the same hard config below:
hard_config = {
"load_dir" : 'instances/train_100_n60_m60',
"idx_list" : list(range(99)),
"timelimit" : 50,
"reward_type" : 'obj'
}
test_config = {
"load_dir" : 'instances/test_100_n60_m60',
"idx_list" : list(range(99)),
"timelimit" : 50,
"reward_type" : 'obj'
}
class LSTM_net(nn.Module):
def __init__(self, input_size, hidden_size, bidirectional=False):
super(LSTM_net, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size, hidden_size,
bidirectional=bidirectional, batch_first=True)
def forward(self, input):
hidden = self.init_hidden()
inputs = torch.FloatTensor(input).view(1, -1, self.input_size)
output, _ = self.lstm(inputs)
# output[-1] is same as last hidden state
output = output[-1].reshape(-1, self.hidden_size)
return output
def init_hidden(self):
return (torch.zeros(1 + int(self.bidirectional), 1, self.hidden_size),
torch.zeros(1 + int(self.bidirectional), 1, self.hidden_size))
class Attention_Net(nn.Module):
def __init__(self, input_size, hidden_size, hidden_size2):
super(Attention_Net, self).__init__()
# constrain and cuts dimension
self.input_size = int(input_size)
self.hidden_size = int(hidden_size)
self.hidden_size2 = int(hidden_size2)
self.lstm1 = LSTM_net(input_size, hidden_size)
self.lstm2 = LSTM_net(input_size, hidden_size)
self.linear1 = nn.Linear(self.hidden_size, self.hidden_size2)
self.linear2 = nn.Linear(self.hidden_size2, self.hidden_size2)
self.tanh = nn.Tanh()
def forward(self, constraints, cuts):
constraints = torch.FloatTensor(constraints)
cuts = torch.FloatTensor(cuts)
# lstm
A_embed = self.lstm1.forward(constraints)
D_embed = self.lstm2.forward(cuts)
# dense
A = self.linear2(self.tanh(self.linear1(A_embed)))
D = self.linear2(self.tanh(self.linear1(D_embed)))
# attention
logits = torch.sum(torch.mm(D, A.T), axis=1)
return logits
# Policy network will just be copied from lab4 and make small modification
class Policy(object):
def __init__(self, input_size, hidden_size, hidden_size2, lr):
self.model = Attention_Net(input_size, hidden_size, hidden_size2)
# DEFINE THE OPTIMIZER
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
def compute_prob(self, constraints, cuts):
constraints = torch.FloatTensor(constraints)
cuts = torch.FloatTensor(cuts)
prob = torch.nn.functional.softmax(self.model(constraints, cuts), dim=-1)
return prob.cpu().data.numpy()
def _to_one_hot(self, y, num_classes):
"""
convert an integer vector y into one-hot representation
"""
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
def train(self, constraints, cuts, actions, Qs):
"""
states: numpy array (states)
actions: numpy array (actions)
Qs: numpy array (Q values)
"""
actions = torch.LongTensor(actions)
Qs = torch.FloatTensor(Qs)
total_loss = 0
# for a bunch of constraints and cuts, need to go one by one
for i in range(len(constraints)):
curr_constraints = constraints[i]
curr_cuts = cuts[i]
curr_action = actions[i]
# COMPUTE probability vector pi(s) for all s in states
logits = self.model(curr_constraints, curr_cuts)
prob = torch.nn.functional.softmax(logits, dim=-1)
# Compute probaility pi(s,a) for all s,a
action_onehot = self._to_one_hot(curr_action, curr_cuts.shape[0])
prob_selected = torch.sum(prob * action_onehot, axis=-1)
# FOR ROBUSTNESS
prob_selected += 1e-8
loss = -torch.mean(Qs[i] * torch.log(prob_selected))
# BACKWARD PASS
self.optimizer.zero_grad()
loss.backward()
# UPDATE
self.optimizer.step()
total_loss += loss.detach().cpu().data.numpy()
return total_loss
def discounted_rewards(r, gamma):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = | np.zeros_like(r) | numpy.zeros_like |
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Class handling the test of any model
#
# ----------------------------------------------------------------------------------------------------------------------
#
# <NAME> - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
from os import makedirs, listdir
from os.path import exists, join
import time
from sklearn.neighbors import KDTree
# PLY reader
from utils.ply import read_ply, write_ply
# Metrics
from utils.metrics import IoU_from_confusions
from sklearn.metrics import confusion_matrix
from tensorflow.python.client import timeline
import json
# ----------------------------------------------------------------------------------------------------------------------
#
# Tester Class
# \******************/
#
class TimeLiner:
def __init__(self):
self._timeline_dict = None
def update_timeline(self, chrome_trace):
# convert crome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
class ModelTester:
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, model, restore_snap=None):
# Tensorflow Saver definition
my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='KernelPointNetwork')
self.saver = tf.train.Saver(my_vars, max_to_keep=100)
# Create a session for running Ops on the Graph.
on_CPU = False
if on_CPU:
cProto = tf.ConfigProto(device_count={'GPU': 0})
else:
cProto = tf.ConfigProto()
cProto.gpu_options.allow_growth = True
self.sess = tf.Session(config=cProto)
# Init variables
self.sess.run(tf.global_variables_initializer())
# Name of the snapshot to restore to (None if you want to start from beginning)
# restore_snap = join(self.saving_path, 'snapshots/snap-40000')
if (restore_snap is not None):
self.saver.restore(self.sess, restore_snap)
print("Model restored from " + restore_snap)
# Add a softmax operation for predictions
self.prob_logits = tf.nn.softmax(model.logits)
# Test main methods
# ------------------------------------------------------------------------------------------------------------------
def test_classification(self, model, dataset, num_votes=100):
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
# Number of classes predicted by the model
nc_model = model.config.num_classes
# Initiate votes
average_probs = np.zeros((len(dataset.input_labels['test']), nc_model))
average_counts = np.zeros((len(dataset.input_labels['test']), nc_model))
mean_dt = np.zeros(2)
last_display = time.time()
while np.min(average_counts) < num_votes:
# Run model on all test examples
# ******************************
# Initiate result containers
probs = []
targets = []
obj_inds = []
count = 0
while True:
try:
# Run one step of the model
t = [time.time()]
ops = (self.prob_logits, model.labels, model.inputs['object_inds'])
prob, labels, inds = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
# Get probs and labels
probs += [prob]
targets += [labels]
obj_inds += [inds]
count += prob.shape[0]
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(np.min(average_counts),
100 * count / dataset.num_test,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
except tf.errors.OutOfRangeError:
break
# Average votes
# *************
# Stack all validation predictions
probs = np.vstack(probs)
targets = np.hstack(targets)
obj_inds = np.hstack(obj_inds)
if np.any(dataset.input_labels['test'][obj_inds] != targets):
raise ValueError('wrong object indices')
# Compute incremental average (predictions are always ordered)
average_counts[obj_inds] += 1
average_probs[obj_inds] += (probs - average_probs[obj_inds]) / (average_counts[obj_inds])
# Save/Display temporary results
# ******************************
test_labels = np.array(dataset.label_values)
# Compute classification results
C1 = confusion_matrix(dataset.input_labels['test'],
np.argmax(average_probs, axis=1),
test_labels)
ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)
print('Test Accuracy = {:.1f}%'.format(ACC))
s = ''
for cc in C1:
for c in cc:
s += '{:d} '.format(c)
s += '\n'
print(s)
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
return
def test_segmentation(self, model, dataset, num_votes=100, num_saves=10):
##################
# Pre-computations
##################
print('Preparing test structures')
t1 = time.time()
# Collect original test file names
original_path = join(dataset.path, 'test_ply')
object_name = model.config.dataset.split('_')[1]
test_names = [f[:-4] for f in listdir(original_path) if f[-4:] == '.ply' and object_name in f]
test_names = np.sort(test_names)
original_labels = []
original_points = []
projection_inds = []
for i, cloud_name in enumerate(test_names):
# Read data in ply file
data = read_ply(join(original_path, cloud_name + '.ply'))
points = np.vstack((data['x'], -data['z'], data['y'])).T
original_labels += [data['label'] - 1]
original_points += [points]
# Create tree structure and compute neighbors
tree = KDTree(dataset.test_points[i])
projection_inds += [np.squeeze(tree.query(points, return_distance=False))]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
##########
# Initiate
##########
# Test saving path
if model.config.saving:
test_path = join('test', model.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
else:
test_path = None
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
# Initiate result containers
average_predictions = [np.zeros((1, 1), dtype=np.float32) for _ in test_names]
#####################
# Network predictions
#####################
mean_dt = np.zeros(2)
last_display = time.time()
for v in range(num_votes):
# Run model on all test examples
# ******************************
# Initiate result containers
all_predictions = []
all_labels = []
all_points = []
all_scales = []
all_rots = []
while True:
try:
# Run one step of the model
t = [time.time()]
ops = (self.prob_logits,
model.labels,
model.inputs['in_batches'],
model.inputs['points'],
model.inputs['augment_scales'],
model.inputs['augment_rotations'])
preds, labels, batches, points, s, R = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
# Stack all predictions for each class separately
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
predictions = preds[b]
# Stack all results
all_predictions += [predictions]
all_labels += [labels[b]]
all_points += [points[0][b]]
all_scales += [s[b_i]]
all_rots += [R[b_i, :, :]]
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(v,
100 * len(all_predictions) / len(original_labels),
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
except tf.errors.OutOfRangeError:
break
# Project predictions on original point clouds
# ********************************************
print('\nGetting test confusions')
t1 = time.time()
proj_predictions = []
Confs = []
for i, cloud_name in enumerate(test_names):
# Interpolate prediction from current positions to original points
proj_predictions += [all_predictions[i][projection_inds[i]]]
# Average prediction across votes
average_predictions[i] = average_predictions[i] + (proj_predictions[i] - average_predictions[i]) / (v + 1)
# Compute confusion matrices
parts = [j for j in range(proj_predictions[i].shape[1])]
Confs += [confusion_matrix(original_labels[i], np.argmax(average_predictions[i], axis=1), parts)]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Save the best/worst segmentations per class
# *******************************************
print('Saving test examples')
t1 = time.time()
# Regroup confusions per object class
Confs = np.stack(Confs)
IoUs = IoU_from_confusions(Confs)
mIoUs = np.mean(IoUs, axis=-1)
# Get X best and worst prediction
order = np.argsort(mIoUs)
worst_inds = order[:num_saves]
best_inds = order[:-num_saves-1:-1]
worst_IoUs = IoUs[order[:num_saves]]
best_IoUs = IoUs[order[:-num_saves-1:-1]]
# Save the names in a file
obj_path = join(test_path, object_name)
if not exists(obj_path):
makedirs(obj_path)
worst_file = join(obj_path, 'worst_inds.txt')
best_file = join(obj_path, 'best_inds.txt')
with open(worst_file, "w") as text_file:
for w_i, w_IoUs in zip(worst_inds, worst_IoUs):
text_file.write('{:d} {:s} :'.format(w_i, test_names[w_i]))
for IoU in w_IoUs:
text_file.write(' {:.1f}'.format(100*IoU))
text_file.write('\n')
with open(best_file, "w") as text_file:
for b_i, b_IoUs in zip(best_inds, best_IoUs):
text_file.write('{:d} {:s} :'.format(b_i, test_names[b_i]))
for IoU in b_IoUs:
text_file.write(' {:.1f}'.format(100*IoU))
text_file.write('\n')
# Save the clouds
for i, w_i in enumerate(worst_inds):
filename = join(obj_path, 'worst_{:02d}.ply'.format(i+1))
preds = np.argmax(average_predictions[w_i], axis=1).astype(np.int32)
write_ply(filename,
[original_points[w_i], original_labels[w_i], preds],
['x', 'y', 'z', 'gt', 'pre'])
for i, b_i in enumerate(best_inds):
filename = join(obj_path, 'best_{:02d}.ply'.format(i+1))
preds = np.argmax(average_predictions[b_i], axis=1).astype(np.int32)
write_ply(filename,
[original_points[b_i], original_labels[b_i], preds],
['x', 'y', 'z', 'gt', 'pre'])
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Display results
# ***************
print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
print('-----|------|--------------------------------------------------------------------------------')
s = '---- | ---- | '
for obj in dataset.label_names:
if obj == object_name:
s += '{:5.2f} '.format(100 * np.mean(mIoUs))
else:
s += '---- '
print(s + '\n')
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
return
def test_multi_segmentation(self, model, dataset, num_votes=100, num_saves=10):
##################
# Pre-computations
##################
print('Preparing test structures')
t1 = time.time()
# Collect original test file names
original_path = join(dataset.path, 'test_ply')
test_names = [f[:-4] for f in listdir(original_path) if f[-4:] == '.ply']
test_names = np.sort(test_names)
original_labels = []
original_points = []
projection_inds = []
for i, cloud_name in enumerate(test_names):
# Read data in ply file
data = read_ply(join(original_path, cloud_name + '.ply'))
points = np.vstack((data['x'], -data['z'], data['y'])).T
original_labels += [data['label'] - 1]
original_points += [points]
# Create tree structure to compute neighbors
tree = KDTree(dataset.input_points['test'][i])
projection_inds += [np.squeeze(tree.query(points, return_distance=False))]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
##########
# Initiate
##########
# Test saving path
if model.config.saving:
test_path = join('test', model.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
else:
test_path = None
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
# Initiate result containers
average_predictions = [np.zeros((1, 1), dtype=np.float32) for _ in test_names]
#####################
# Network predictions
#####################
mean_dt = np.zeros(2)
last_display = time.time()
for v in range(num_votes):
# Run model on all test examples
# ******************************
# Initiate result containers
all_predictions = []
all_obj_inds = []
while True:
try:
# Run one step of the model
t = [time.time()]
ops = (self.prob_logits,
model.labels,
model.inputs['super_labels'],
model.inputs['object_inds'],
model.inputs['in_batches'])
preds, labels, obj_labels, o_inds, batches = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
# Stack all predictions for each class separately
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
obj = obj_labels[b[0]]
predictions = preds[b][:, :model.config.num_classes[obj]]
# Stack all results
all_predictions += [predictions]
all_obj_inds += [o_inds[b_i]]
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(v,
100 * len(all_predictions) / dataset.num_test,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
except tf.errors.OutOfRangeError:
break
# Project predictions on original point clouds
# ********************************************
print('\nGetting test confusions')
t1 = time.time()
for i, probs in enumerate(all_predictions):
# Interpolate prediction from current positions to original points
obj_i = all_obj_inds[i]
proj_predictions = probs[projection_inds[obj_i]]
# Average prediction across votes
average_predictions[obj_i] = average_predictions[obj_i] + \
(proj_predictions - average_predictions[obj_i]) / (v + 1)
Confs = []
for obj_i, avg_probs in enumerate(average_predictions):
# Compute confusion matrices
parts = [j for j in range(avg_probs.shape[1])]
Confs += [confusion_matrix(original_labels[obj_i], np.argmax(avg_probs, axis=1), parts)]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Save the best/worst segmentations per class
# *******************************************
print('Saving test examples')
t1 = time.time()
# Regroup confusions per object class
Confs = np.array(Confs)
obj_mIoUs = []
for l in dataset.label_values:
# Get confusions for this object
obj_inds = np.where(dataset.input_labels['test'] == l)[0]
obj_confs = np.stack(Confs[obj_inds])
# Get IoU
obj_IoUs = IoU_from_confusions(obj_confs)
obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]
# Get X best and worst prediction
order = np.argsort(obj_mIoUs[-1])
worst_inds = obj_inds[order[:num_saves]]
best_inds = obj_inds[order[:-num_saves-1:-1]]
worst_IoUs = obj_IoUs[order[:num_saves]]
best_IoUs = obj_IoUs[order[:-num_saves-1:-1]]
# Save the names in a file
obj_path = join(test_path, dataset.label_to_names[l])
if not exists(obj_path):
makedirs(obj_path)
worst_file = join(obj_path, 'worst_inds.txt')
best_file = join(obj_path, 'best_inds.txt')
with open(worst_file, "w") as text_file:
for w_i, w_IoUs in zip(worst_inds, worst_IoUs):
text_file.write('{:d} {:s} :'.format(w_i, test_names[w_i]))
for IoU in w_IoUs:
text_file.write(' {:.1f}'.format(100*IoU))
text_file.write('\n')
with open(best_file, "w") as text_file:
for b_i, b_IoUs in zip(best_inds, best_IoUs):
text_file.write('{:d} {:s} :'.format(b_i, test_names[b_i]))
for IoU in b_IoUs:
text_file.write(' {:.1f}'.format(100*IoU))
text_file.write('\n')
# Save the clouds
for i, w_i in enumerate(worst_inds):
filename = join(obj_path, 'worst_{:02d}.ply'.format(i+1))
preds = np.argmax(average_predictions[w_i], axis=1).astype(np.int32)
write_ply(filename,
[original_points[w_i], original_labels[w_i], preds],
['x', 'y', 'z', 'gt', 'pre'])
for i, b_i in enumerate(best_inds):
filename = join(obj_path, 'best_{:02d}.ply'.format(i+1))
preds = np.argmax(average_predictions[b_i], axis=1).astype(np.int32)
write_ply(filename,
[original_points[b_i], original_labels[b_i], preds],
['x', 'y', 'z', 'gt', 'pre'])
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Display results
# ***************
objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]
instance_average = np.mean(np.hstack(obj_mIoUs))
class_average = np.mean(objs_average)
print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
print('-----|------|--------------------------------------------------------------------------------')
s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)
for AmIoU in objs_average:
s += '{:4.1f} '.format(100 * AmIoU)
print(s + '\n')
# Initialise iterator with test data
self.sess.run(dataset.test_init_op)
return
def test_cloud_segmentation(self, model, dataset, num_votes=100):
##########
# Initiate
##########
# Smoothing parameter for votes
test_smooth = 0.98
# Initialise iterator with train data
self.sess.run(dataset.test_init_op)
# Initiate global prediction over test clouds
nc_model = model.config.num_classes
self.test_probs = [np.zeros((l.data.shape[0], nc_model), dtype=np.float32) for l in dataset.input_trees['test']]
# Test saving path
if model.config.saving:
test_path = join('/raid/workspace/fan/res/SH/test', model.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'predictions')):
makedirs(join(test_path, 'predictions'))
if not exists(join(test_path, 'probs')):
makedirs(join(test_path, 'probs'))
else:
test_path = None
#####################
# Network predictions
#####################
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
many_runs_timeline = TimeLiner()
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
model.labels,
model.inputs['in_batches'],
model.inputs['point_inds'],
model.inputs['cloud_inds'])
stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(ops,
{model.dropout_prob: 1.0})
"""
stacked_probs, labels, batches, point_inds, cloud_inds = self.sess.run(ops,
{model.dropout_prob: 1.0},
options=options,
run_metadata=run_metadata)
"""
t += [time.time()]
#fetched_timeline = timeline.Timeline(run_metadata.step_stats)
#chrome_trace = fetched_timeline.generate_chrome_trace_format()
#many_runs_timeline.update_timeline(chrome_trace)
if False:
many_runs_timeline.save('timeline_merged_%d_runs.json' % i0)
a = 1/0
# Get predictions and labels per instance
# ***************************************
# Stack all predictions for each class separately
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b_i]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1-test_smooth) * probs
# Average timing
t += [time.time()]
#print(batches.shape, stacked_probs.shape, 1000*(t[1] - t[0]), 1000*(t[2] - t[1]))
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind,
i0,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
np.min(dataset.min_potentials['test'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['test'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
print([np.mean(pots) for pots in dataset.potentials['test']])
if last_min + 2 < new_min:
print('Saving clouds')
# Update last_min
last_min = new_min
# Project predictions
print('\nReproject Vote #{:d}'.format(int( | np.floor(new_min) | numpy.floor |
import argparse
import os, time, datetime
import torch
from torch import nn
import torchvision
import numpy as np
import cv2
import scipy.io
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import torch_geometric.data
import neural_renderer as nr
import dataio
import data_util
import util
import metric
import network
import render
import misc
import sph_harm
parser = argparse.ArgumentParser()
# general
parser.add_argument('--data_root', required=True,
help='Path to directory that holds the object data. See dataio.py for directory structure etc..')
parser.add_argument('--logging_root', type=str, default=None, required=False,
help='Path to directory where to write tensorboard logs and checkpoints.')
# mesh
parser.add_argument('--calib_fp', type=str, default='_/calib.mat', required=False,
help='Path of calibration file.')
parser.add_argument('--calib_format', type=str, default='convert', required=False,
help='Format of calibration file')
parser.add_argument('--obj_high_fp', type=str, default='_/mesh.obj', required=False,
help='Path of high-resolution mesh obj.')
parser.add_argument('--obj_low_fp', type=str, default='_/mesh_7500v.obj', required=False,
help='Path of low-resolution mesh obj.')
parser.add_argument('--obj_gcn_fp', type=str, default='_/mesh_7500v.obj', required=False,
help='Path of mesh obj for gcn.')
parser.add_argument('--tex_fp', type=str, default='_/tex.png', required=False,
help='Path of texture.')
# view datasets
parser.add_argument('--img_size', type=int, default=512,
help='Sidelength of generated images. Default 512. Only less than native resolution of images is recommended.')
parser.add_argument('--img_gamma', type=float, default=1.0,
help='Image gamma.')
# texture mapper
parser.add_argument('--texture_size', type=int, default=512,
help='Sidelength of neural texture. Default 512.')
parser.add_argument('--texture_num_ch', type=int, default=24,
help='Number of channels for neural texture. Default 24.')
parser.add_argument('--mipmap_level', type=int, default=4, required=False,
help='Mipmap levels for neural texture. Default 4.')
parser.add_argument('--init_tex', default=False, type = lambda x: (str(x).lower() in ['true', '1']),
help='Whether initialize neural texture using reconstructed texture.')
parser.add_argument('--fix_tex', default=False, type = lambda x: (str(x).lower() in ['true', '1']),
help='Whether fix neural texture.')
parser.add_argument('--apply_sh', default=True, type = lambda x: (str(x).lower() in ['true', '1']),
help='Whether apply spherical harmonics to sampled feature maps. Default True.')
# lighting
parser.add_argument('--lp_dir', type=str, default=None, required=False,
help='Path to directory that holds the light probe data.')
parser.add_argument('--sphere_samples_fp', type = str, default='./sphere_samples_4096.mat', required=False,
help='Path to sphere samples.')
parser.add_argument('--sh_lmax', type = int, default=10, required=False,
help='Maximum degrees of SH basis for lighting.')
parser.add_argument('--fix_lighting', default = False, type = lambda x: (str(x).lower() in ['true', '1']),
help='Whether fix lighting params.')
parser.add_argument('--init_lighting', default=True, type = lambda x: (str(x).lower() in ['true', '1']),
help='Whether initialize lighting params.')
parser.add_argument('--lighting_idx', default = None, type = int,
help='Lighting index for training.')
parser.add_argument('--lighting_relight_idx', default = None, type = int,
help='Lighting index for relighting.')
# rendering net
parser.add_argument('--nf0', type=int, default=64,
help='Number of features in outermost layer of U-Net architectures.')
# gcn
parser.add_argument('--in_channels', default=6, type=int, help='the channel size of input point cloud')
parser.add_argument('--kernel_size', default=16, type=int, help='neighbor num (default:16)')
parser.add_argument('--block_type', default='res', type=str, help='graph backbone block type {res, dense}')
parser.add_argument('--conv_type', default='edge', type=str, help='graph conv layer {edge, mr}')
parser.add_argument('--act_type', default='relu', type=str, help='activation layer {relu, prelu, leakyrelu}')
parser.add_argument('--norm_type', default='batch', type=str, help='batch or instance normalization')
parser.add_argument('--bias', default=True, type=bool, help='bias of conv layer, True or False')
parser.add_argument('--n_filters', default=64, type=int, help='number of channels of deep features')
parser.add_argument('--n_blocks', default=20, type=int, help='number of basic blocks')
parser.add_argument('--epsilon', default=0.2, type=float, help='stochastic epsilon for gcn')
parser.add_argument('--stochastic', default=True, type=bool, help='stochastic for gcn, True or False')
parser.add_argument('--out_channels_gcn', default=512, type=int, help='the channel size of output features')
# losses
parser.add_argument('--loss_lighting_weight', type=float, default=1.0)
parser.add_argument('--loss_lighting_uncovered_weight', type=float, default=0.1)
parser.add_argument('--loss_rays_lt_chrom_weight', type=float, default=1.0)
parser.add_argument('--loss_alb_weight', type=float, default=1.0)
# training
parser.add_argument('--max_epoch', type=int, default=2000, help='Maximum number of epochs to train for.')
parser.add_argument('--max_iter', type=int, default=None, help='Maximum number of iterations to train for.')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--sampling_pattern', type=str, default='all', required=False)
parser.add_argument('--batch_size', type=int, default=1, help='Batch size.')
# validation
parser.add_argument('--sampling_pattern_val', type=str, default='all', required=False)
parser.add_argument('--val_freq', type=int, default=1000,
help='Test on validation data every X iterations.')
# misc
parser.add_argument('--exp_name', type=str, default='', help='(optional) Name for experiment.')
parser.add_argument('--gpu_id', type=str, default='', help='Cuda visible devices. First device for gcn, last device for the others.')
parser.add_argument('--start_epoch', type=int, default=0, help='Start epoch')
parser.add_argument('--log_freq', type=int, default=100, help='Save tensorboard logs every X iterations.')
parser.add_argument('--ckp_freq', type=int, default=5000, help='Save checkpoint every X iterations.')
opt = parser.parse_args()
if opt.logging_root is None:
opt.logging_root = os.path.join(opt.data_root, 'logs', 'rnr')
if opt.calib_fp[:2] == '_/':
opt.calib_fp = os.path.join(opt.data_root, opt.calib_fp[2:])
if opt.obj_high_fp[:2] == '_/':
opt.obj_high_fp = os.path.join(opt.data_root, opt.obj_high_fp[2:])
if opt.obj_low_fp[:2] == '_/':
opt.obj_low_fp = os.path.join(opt.data_root, opt.obj_low_fp[2:])
if opt.obj_gcn_fp[:2] == '_/':
opt.obj_gcn_fp = os.path.join(opt.data_root, opt.obj_gcn_fp[2:])
if opt.tex_fp[:2] == '_/':
opt.tex_fp = os.path.join(opt.data_root, opt.tex_fp[2:])
if opt.lp_dir is not None and opt.lp_dir[:2] == '_/':
opt.lp_dir = os.path.join(opt.data_root, opt.lp_dir[2:])
if opt.sphere_samples_fp[:2] == '_/':
opt.sphere_samples_fp = os.path.join(opt.data_root, opt.sphere_samples_fp[2:])
obj_high_name = opt.obj_high_fp.split('/')[-1].split('.')[0]
obj_low_name = opt.obj_low_fp.split('/')[-1].split('.')[0]
opt.precomp_high_dir = os.path.join(opt.data_root, 'precomp_' + obj_high_name)
opt.precomp_low_dir = os.path.join(opt.data_root, 'precomp_' + obj_low_name)
print('\n'.join(["%s: %s" % (key, value) for key, value in vars(opt).items()]))
# device allocation
if opt.gpu_id == '':
device_gcn = torch.device('cpu')
device = torch.device('cpu')
else:
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
device_gcn = torch.device('cuda:' + opt.gpu_id[0])
device = torch.device('cuda:' + opt.gpu_id[-1])
# load global_RT
if opt.calib_format == 'convert':
global_RT = torch.from_numpy(scipy.io.loadmat(opt.calib_fp)['global_RT'].astype(np.float32))
else:
global_RT = None
# load texture of obj
texture_init = cv2.cvtColor(cv2.imread(opt.tex_fp), cv2.COLOR_BGR2RGB)
texture_init_resize = cv2.resize(texture_init, (opt.texture_size, opt.texture_size), interpolation = cv2.INTER_AREA).astype(np.float32) / 255.0
texture_init_use = None
if opt.init_tex is True:
texture_init_use = torch.from_numpy(texture_init_resize)
num_channel = texture_init.shape[-1]
# sample light directions on sphere
l_dir_np = scipy.io.loadmat(opt.sphere_samples_fp)['sphere_samples'].transpose() # [3, num_sample]
l_dir = torch.from_numpy(l_dir_np) # [3, num_sample]
num_sample = l_dir.shape[1]
# handle lighting options
has_lighting_gt = True
if opt.lighting_idx is None:
has_lighting_gt = False
opt.lighting_idx = 0 # store estimated lighting as the first lighting
has_lighting_init = opt.init_lighting
has_lighting_relight = True
if opt.lighting_relight_idx is None:
has_lighting_relight = False
# dataset for training views
if opt.lighting_idx is not None:
img_dir = opt.data_root + '/rgb' + str(opt.lighting_idx) + '/'
else:
img_dir = opt.data_root + '/rgb0/'
view_dataset = dataio.ViewDataset(root_dir = opt.data_root,
img_dir = img_dir,
calib_path = opt.calib_fp,
calib_format = opt.calib_format,
img_size = [opt.img_size, opt.img_size],
sampling_pattern = opt.sampling_pattern,
load_precompute = True,
precomp_high_dir = opt.precomp_high_dir,
precomp_low_dir = opt.precomp_low_dir,
img_gamma = opt.img_gamma,
)
# dataset for relighted training views
img_relight_dir = opt.data_root + '/rgb' + str(opt.lighting_relight_idx) + '/'
if os.path.isdir(img_relight_dir):
view_dataset_relight = dataio.ViewDataset(root_dir = opt.data_root,
img_dir = img_relight_dir,
calib_path = opt.calib_fp,
calib_format = opt.calib_format,
img_size = [opt.img_size, opt.img_size],
sampling_pattern = opt.sampling_pattern,
img_gamma = opt.img_gamma,
)
has_view_relight = has_lighting_relight and ('view_dataset_relight' in globals())
# dataset for validation views
view_val_dataset = dataio.ViewDataset(root_dir = opt.data_root,
img_dir = img_dir,
calib_path = opt.calib_fp,
calib_format = opt.calib_format,
img_size = [opt.img_size, opt.img_size],
sampling_pattern = opt.sampling_pattern_val,
load_precompute = True,
precomp_high_dir = opt.precomp_high_dir,
precomp_low_dir = opt.precomp_low_dir,
img_gamma = opt.img_gamma,
)
num_view_val = len(view_val_dataset)
# dataset for relighted validation views
if os.path.isdir(img_relight_dir):
view_val_dataset_relight = dataio.ViewDataset(root_dir = opt.data_root,
img_dir = img_relight_dir,
calib_path = opt.calib_fp,
calib_format = opt.calib_format,
img_size = [opt.img_size, opt.img_size],
sampling_pattern = opt.sampling_pattern_val,
img_gamma = opt.img_gamma,
)
# dataset loader for light probes
if opt.lp_dir is not None:
lp_dataset = dataio.LightProbeDataset(data_dir = opt.lp_dir)
print('Start buffering light probe data...')
lp_dataset.buffer_all()
lp_dataloader = DataLoader(lp_dataset, batch_size = 1, shuffle = False, num_workers = 8)
else:
lp_dataloader = None
# interpolater
interpolater = network.Interpolater()
# texture mapper
texture_mapper = network.TextureMapper(texture_size = opt.texture_size,
texture_num_ch = opt.texture_num_ch,
mipmap_level = opt.mipmap_level,
texture_init = texture_init_use,
fix_texture = opt.fix_tex,
apply_sh = opt.apply_sh)
# gcn input
v_attr, f_attr = nr.load_obj(opt.obj_gcn_fp, normalization = False, use_cuda = False)
gcn_input = torch_geometric.data.Data(pos = v_attr['v'], x = v_attr['v']).to(device_gcn)
opt.num_v_gcn = v_attr['v'].shape[0]
# deep_gcn
gcn = network.DenseDeepGCN(opt)
# lighting model lp
if lp_dataloader is not None:
lighting_model_lp = network.LightingLP(l_dir, num_channel = num_channel, lp_dataloader = lp_dataloader, fix_params = opt.fix_lighting)
lighting_model_lp.fit_sh(lmax = opt.sh_lmax)
# lighting model sh
if 'lighting_model_lp' in globals():
lighting_model_sh = network.LightingSH(l_dir, lmax = opt.sh_lmax, num_lighting = lighting_model_lp.num_lighting, num_channel = num_channel, init_coeff = lighting_model_lp.sh_coeff, fix_params = opt.fix_lighting, lp_recon_h = 256, lp_recon_w = 512)
else:
lighting_model_sh = network.LightingSH(l_dir, lmax = opt.sh_lmax, num_lighting = 1, num_channel = num_channel, fix_params = opt.fix_lighting, lp_recon_h = 256, lp_recon_w = 512)
lighting_model = lighting_model_sh
#################### process lighting ####################
# load stitched light probes
if opt.lighting_idx is None:
idx_use = 0
else:
idx_use = opt.lighting_idx
lp_stitch_dir = os.path.join(opt.data_root, 'light_probe_stitch_' + opt.sampling_pattern)
if os.path.isfile(os.path.join(lp_stitch_dir, str(idx_use) + '.exr')):
lp_stitch = cv2.imread(os.path.join(lp_stitch_dir, str(idx_use) + '.exr'), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
else:
lp_stitch = cv2.imread(os.path.join(lp_stitch_dir, str(idx_use) + '.png'), cv2.IMREAD_UNCHANGED)[:, :, :3].astype(np.float32) / 255.
lp_stitch[np.isnan(lp_stitch)] = 0
lp_stitch = cv2.cvtColor(lp_stitch, cv2.COLOR_BGR2RGB) ** opt.img_gamma
lp_stitch_mask = cv2.imread(os.path.join(lp_stitch_dir, 'mask', str(idx_use) + '.png')).astype(np.float32) / 255.0
lp_stitch_count = scipy.io.loadmat(os.path.join(lp_stitch_dir, 'count', str(idx_use) + '.mat'))
lp_stitch_count = lp_stitch_count['count'].astype(np.float32) / lp_stitch_count['num_view'].astype(np.float32)
# fill in missing regions
for ith_ch in range(num_channel):
lp_stitch[lp_stitch_mask[:, :, ith_ch] == 0, ith_ch] = lp_stitch[lp_stitch_mask[:, :, ith_ch] == 1, ith_ch].mean()
# resize
lp_stitch_resize = cv2.resize(lp_stitch, (lighting_model_sh.lp_recon_w, lighting_model_sh.lp_recon_h), interpolation = cv2.INTER_AREA)
lp_stitch_mask_resize = cv2.resize(lp_stitch_mask, (lighting_model_sh.lp_recon_w, lighting_model_sh.lp_recon_h), interpolation = cv2.INTER_AREA)
lp_stitch_count_resize = cv2.resize(lp_stitch_count, (lighting_model_sh.lp_recon_w, lighting_model_sh.lp_recon_h), interpolation = cv2.INTER_AREA)
# convert to pytorch tensors
lp_stitch = torch.from_numpy(lp_stitch)
lp_stitch_mask = torch.from_numpy(lp_stitch_mask) == 1
lp_stitch_count = torch.from_numpy(lp_stitch_count)
lp_stitch_resize = torch.from_numpy(lp_stitch_resize).to(device)
lp_stitch_mask_resize = (torch.from_numpy(lp_stitch_mask_resize) == 1).to(device)
lp_stitch_count_resize = torch.from_numpy(lp_stitch_count_resize).to(device)
# fit sh to lp_stitch
l_samples_uv = render.spherical_mapping(l_dir) # [2, num_sample]
l_samples_lp_stitch = misc.interpolate_bilinear(lp_stitch, (l_samples_uv[None, 0, :] * float(lp_stitch.shape[1])).clamp(max = lp_stitch.shape[1] - 1), (l_samples_uv[None, 1, :] * float(lp_stitch.shape[0])).clamp(max = lp_stitch.shape[0] - 1))[0, :] # [num_sample, num_channel]
l_samples_mask = misc.interpolate_bilinear(lp_stitch_mask.to(torch.float32), (l_samples_uv[None, 0, :] * float(lp_stitch.shape[1])).clamp(max = lp_stitch.shape[1] - 1), (l_samples_uv[None, 1, :] * float(lp_stitch.shape[0])).clamp(max = lp_stitch.shape[0] - 1))[0, :, 0] == 1 # [num_sample]
lp_stitch_sh_coeff = sph_harm.fit_sh_coeff(samples = l_samples_lp_stitch, sh_basis_val = lighting_model_sh.basis_val) # [num_basis, num_channel]
# lighting gt (sh and reconstructed lp)
if has_lighting_gt:
lighting_sh_coeff_gt = lighting_model_sh.coeff.data[opt.lighting_idx, :].clone().to(device) # [num_basis, num_channel]
lp_gt = lighting_model_sh.reconstruct_lp(lighting_sh_coeff_gt.cpu()).to(device)
# lighting stitch (sh and reconstructed lp)
lighting_sh_coeff_stitch = lp_stitch_sh_coeff.to(device)
lp_stitch_sh_recon = lighting_model_sh.reconstruct_lp(lighting_sh_coeff_stitch.cpu()).to(device) # [H, W, C]
# initialize lighting
if has_lighting_init:
lighting_sh_coeff_init = lighting_sh_coeff_stitch.clone() # [num_basis, num_channel]
lighting_model_sh.coeff.data[opt.lighting_idx, :] = lighting_sh_coeff_init # initialize
lp_init = lighting_model_sh.reconstruct_lp(lighting_sh_coeff_init.cpu()).to(device)
l_samples_init = l_samples_lp_stitch.clone().to(device)
l_samples_init_mask = l_samples_mask.clone().to(device)
else:
lighting_model_sh.coeff.data[opt.lighting_idx, :] = 0.1 # reset lighting params, don't set to zero (normalize_factor will be nan)
# get lighting data for relight
if has_lighting_relight:
l_samples_relight_lp = lighting_model_lp.l_samples.data[opt.lighting_relight_idx, :].to(device) # [num_sample, num_channel]
lp_relight_lp = lighting_model_lp.lps[opt.lighting_relight_idx, :].to(device) # [H, W, C]
l_samples_relight_sh = lighting_model_sh.l_samples.data[opt.lighting_relight_idx, :].to(device) # [num_sample, num_channel]
lp_relight_sh = lighting_model_sh.reconstruct_lp(lighting_model_sh.coeff.data[opt.lighting_relight_idx, :]).to(device) # [H, W, C]
l_samples_relight = l_samples_relight_sh
lp_relight = lp_relight_sh
########################################
# ray sampler specular
opt.num_azi = 6
opt.num_polar = 2
opt.interval_polar = 5
ray_sampler = network.RaySampler(num_azi = opt.num_azi, num_polar = opt.num_polar, interval_polar = opt.interval_polar)
num_ray = ray_sampler.num_ray
# ray sampler diffuse
opt.num_azi = 6
opt.num_polar = 2
opt.interval_polar = 10
ray_sampler_diffuse = network.RaySampler(num_azi = opt.num_azi, num_polar = opt.num_polar, interval_polar = opt.interval_polar, mode = 'diffuse')
num_ray_diffuse = ray_sampler_diffuse.num_ray
num_ray_total = num_ray + num_ray_diffuse
# rendering net
render_net = network.RenderingNet(nf0 = opt.nf0,
in_channels = num_ray_total * 3 + 6 + opt.texture_num_ch,
out_channels = 3 * num_ray_total,
num_down_unet = 5,
out_channels_gcn = opt.out_channels_gcn)
# ray renderer
ray_renderer = network.RayRenderer(lighting_model, interpolater)
# L1 loss
criterionL1 = nn.L1Loss(reduction = 'mean').to(device)
# Chrom loss
criterion_rays_lt_chrom = network.RaysLTChromLoss().to(device)
# Optimizer
optimizerG = torch.optim.Adam(list(gcn.parameters()) + list(texture_mapper.parameters()) + list(lighting_model.parameters()) + list(render_net.parameters()), lr = opt.lr)
optimizerG.zero_grad()
# move to device
interpolater.to(device)
texture_mapper.to(device)
lighting_model.to(device)
ray_sampler.to(device)
ray_sampler_diffuse.to(device)
render_net.to(device)
ray_renderer.to(device)
gcn.to(device_gcn)
# get module
texture_mapper_module = texture_mapper
lighting_model_module = lighting_model
ray_sampler_module = ray_sampler
ray_sampler_diffuse_module = ray_sampler_diffuse
render_net_module = render_net
gcn_module = gcn
# set to training mode
interpolater.train()
texture_mapper.train()
lighting_model.train()
ray_sampler.train()
ray_sampler_diffuse.train()
render_net.train()
ray_renderer.train()
gcn.train()
# collect all networks and optimizers
part_list = [texture_mapper_module, lighting_model_module, ray_sampler_module, ray_sampler_diffuse_module, render_net_module, gcn_module, []]
part_name_list = ['texture_mapper', 'lighting_model', 'ray_sampler', 'ray_sampler_diffuse', 'render_net', 'gcn', 'v_feature']
print("*" * 100)
print("Number of parameters")
print("texture mapper:")
opt.num_params_texture_mapper = util.print_network(texture_mapper)
print("lighting model:")
opt.num_params_lighting_model = util.print_network(lighting_model)
print("render net:")
opt.num_params_render_net = util.print_network(render_net)
print("gcn:")
opt.num_params_gcn = util.print_network(gcn)
print("*" * 100)
def main():
print('Start buffering data for training views...')
view_dataset.buffer_all()
view_dataloader = DataLoader(view_dataset, batch_size = opt.batch_size, shuffle = True, num_workers = 8)
if has_view_relight:
print('Start buffering data for relighted training views...')
view_dataset_relight.buffer_all()
print('Start buffering data for validation views...')
view_val_dataset.buffer_all()
view_val_dataloader = DataLoader(view_val_dataset, batch_size = opt.batch_size, shuffle = False, num_workers = 8)
if has_view_relight:
print('Start buffering data for relighted validation views...')
view_val_dataset_relight.buffer_all()
# directory name contains some info about hyperparameters.
dir_name = os.path.join(datetime.datetime.now().strftime('%m-%d') +
'_' + datetime.datetime.now().strftime('%H-%M-%S') +
'_' + opt.sampling_pattern +
'_' + opt.data_root.strip('/').split('/')[-1])
if opt.exp_name is not '':
dir_name += '_' + opt.exp_name
# directory for logging
log_dir = os.path.join(opt.logging_root, dir_name)
data_util.cond_mkdir(log_dir)
# directory for saving validation data on view synthesis
val_out_dir = os.path.join(log_dir, 'val_out')
val_gt_dir = os.path.join(log_dir, 'val_gt')
val_err_dir = os.path.join(log_dir, 'val_err')
data_util.cond_mkdir(val_out_dir)
data_util.cond_mkdir(val_gt_dir)
data_util.cond_mkdir(val_err_dir)
# directory for saving validation data on relighting
val_relight_out_dir = os.path.join(log_dir, 'val_relight_out')
data_util.cond_mkdir(val_relight_out_dir)
if has_view_relight:
val_relight_gt_dir = os.path.join(log_dir, 'val_relight_gt')
val_relight_err_dir = os.path.join(log_dir, 'val_relight_err')
data_util.cond_mkdir(val_relight_gt_dir)
data_util.cond_mkdir(val_relight_err_dir)
# Save all command line arguments into a txt file in the logging directory for later reference.
with open(os.path.join(log_dir, "params.txt"), "w") as out_file:
out_file.write('\n'.join(["%s: %s" % (key, value) for key, value in vars(opt).items()]))
# tensorboardX writer
writer = SummaryWriter(log_dir)
iter = opt.start_epoch * len(view_dataset)
print('Begin training...')
val_log_batch_id = 0
first_val = True
for epoch in range(opt.start_epoch, opt.max_epoch):
for view_trgt in view_dataloader:
if opt.max_iter is not None and iter >= opt.max_iter:
return
start = time.time()
# gcn features
v_feature = gcn(gcn_input).to(device)
# get view data
TBN_map = view_trgt[0]['TBN_map'].to(device) # [N, H, W, 3, 3]
uv_map = view_trgt[0]['uv_map'].to(device) # [N, H, W, 2]
sh_basis_map = view_trgt[0]['sh_basis_map'].to(device) # [N, H, W, 9]
normal_map = view_trgt[0]['normal_map'].to(device) # [N, H, W, 3]
view_dir_map = view_trgt[0]['view_dir_map'].to(device) # [N, H, W, 3]
view_dir_map_tangent = view_trgt[0]['view_dir_map_tangent'].to(device) # [N, H, W, 3]
alpha_map = view_trgt[0]['alpha_map'][:, None, :, :].to(device) # [N, 1, H, W]
view_idx = view_trgt[0]['idx']
batch_size = alpha_map.shape[0]
img_h = alpha_map.shape[2]
img_w = alpha_map.shape[3]
num_view = len(view_trgt)
img_gt = []
for i in range(num_view):
img_gt.append(view_trgt[i]['img_gt'].to(device)) # [N, C, H, W]
# sample texture
neural_img = texture_mapper(uv_map, sh_basis_map, sh_start_ch = 6) # [N, C, H, W]
albedo_diffuse = neural_img[:, :3, :, :]
albedo_specular = neural_img[:, 3:6, :, :]
# sample specular rays
rays_dir, rays_uv, rays_dir_tangent = ray_sampler(TBN_map, view_dir_map_tangent, alpha_map.permute((0, 2, 3, 1))) # [N, H, W, 3, num_ray], [N, H, W, 2, num_ray], [N, H, W, 3, num_ray]
num_ray = rays_uv.shape[-1]
# sample diffuse rays
rays_diffuse_dir, rays_diffuse_uv, _ = ray_sampler_diffuse(TBN_map, view_dir_map_tangent, alpha_map.permute((0, 2, 3, 1))) # [N, H, W, 3, num_ray], [N, H, W, 2, num_ray]
num_ray_diffuse = rays_diffuse_uv.shape[-1]
num_ray_total = num_ray + num_ray_diffuse
# concat data
rays_dir = torch.cat((rays_dir, rays_diffuse_dir), dim = -1)
rays_uv = torch.cat((rays_uv, rays_diffuse_uv), dim = -1)
# estimate light transport for rays
render_net_input = torch.cat((rays_dir.permute((0, -1, -2, 1, 2)).reshape((batch_size, -1, img_h, img_w)),
normal_map.permute((0, 3, 1, 2)),
view_dir_map.permute((0, 3, 1, 2)),
neural_img), dim = 1)
rays_lt = render_net(render_net_input, v_feature).reshape((batch_size, num_ray_total, -1, img_h, img_w)) # [N, num_ray, C, H, W]
lt_max_val = 2.0
rays_lt = (rays_lt * 0.5 + 0.5) * lt_max_val # map to [0, lt_max_val]
# render using ray_renderer
outputs_final, _, _, _, _, _, _ = ray_renderer(albedo_specular, rays_uv, rays_lt, lighting_idx = opt.lighting_idx, albedo_diffuse = albedo_diffuse, num_ray_diffuse = num_ray_diffuse, seperate_albedo = True)
outputs_final = [outputs_final] # [N, C, H, W]
with torch.no_grad():
# relight
if has_lighting_relight:
# ray renderer
outputs_final_relight, _, _, _, _, _, _ = ray_renderer(albedo_specular, rays_uv, rays_lt, lp = lp_relight[None, :].expand(batch_size, -1, -1, -1), albedo_diffuse = albedo_diffuse, num_ray_diffuse = num_ray_diffuse, seperate_albedo = True)
outputs_final_relight = [outputs_final_relight] # [N, C, H, W]
# relight gt
if has_view_relight:
img_relight_gt = []
for i in range(batch_size):
img_relight_gt.append(view_dataset_relight.views_all[view_idx[i]]['img_gt'])
img_relight_gt = torch.stack(img_relight_gt).to(device)
img_relight_gt = [img_relight_gt]
# get estimated lighting SH coeffs
lighting_sh_coeff_est = lighting_model_module.get_lighting_params(opt.lighting_idx) # [num_basis, num_channel]
# reconstruct light probe
lp_est = lighting_model_module.reconstruct_lp(lighting_sh_coeff_est)
# reconstruct light samples
l_samples_est = sph_harm.reconstruct_sh(lighting_sh_coeff_est, lighting_model_module.basis_val)
# We don't enforce a loss on the outermost 5 pixels to alleviate boundary errors, also weight loss by alpha
alpha_map_central = alpha_map[:, :, 5:-5, 5:-5]
img_gt_orig = img_gt[0].clone()
for i in range(num_view):
outputs_final[i] = outputs_final[i][:, :, 5:-5, 5:-5] * alpha_map_central
img_gt[i] = img_gt[i][:, :, 5:-5, 5:-5] * alpha_map_central
if has_lighting_relight:
outputs_final_relight[i] = outputs_final_relight[i][:, :, 5:-5, 5:-5] * alpha_map_central
if has_view_relight:
img_relight_gt[i] = img_relight_gt[i][:, :, 5:-5, 5:-5] * alpha_map_central
loss_lighting = 0
if not opt.fix_lighting:
# loss on estimated light samples
loss_lighting = (l_samples_init[l_samples_init_mask, :] - l_samples_est[l_samples_init_mask, :]).abs().sum() / l_samples_init_mask.to(l_samples_est.dtype).sum() * opt.loss_lighting_weight
loss_lighting = loss_lighting + (l_samples_init[(l_samples_init_mask != 1), :] - l_samples_est[(l_samples_init_mask != 1), :]).abs().sum() / (l_samples_init_mask != 1).to(l_samples_est.dtype).sum() * opt.loss_lighting_uncovered_weight
# loss on final img
loss_rn = list()
for idx in range(num_view):
loss_rn.append(criterionL1(outputs_final[idx].contiguous().view(-1).float(), img_gt[idx].view(-1).float()))
loss_rn = torch.stack(loss_rn, dim = 0).mean()
# loss on rays light transport chromaticity
try:
loss_rays_lt_chrom, rays_lt_chrom, rays_lt_chrom_mean, rays_lt_chrom_diff = criterion_rays_lt_chrom(rays_lt, alpha_map, img_gt_orig)
except:
loss_rays_lt_chrom, rays_lt_chrom, rays_lt_chrom_mean, rays_lt_chrom_diff = criterion_rays_lt_chrom.cpu()(rays_lt.cpu(), alpha_map.cpu(), img_gt_orig.cpu())
loss_rays_lt_chrom = loss_rays_lt_chrom.to(device)
loss_rays_lt_chrom = loss_rays_lt_chrom * opt.loss_rays_lt_chrom_weight
# loss on albedo mean value
albedo_specular_tex = texture_mapper_module.flatten_mipmap(start_ch = 3, end_ch = 6) # [1, H, W, C]
albedo_diffuse_tex = texture_mapper_module.flatten_mipmap(start_ch = 0, end_ch = 3) # [1, H, W, C]
mask_valid_tex_spec = (albedo_specular_tex != texture_mapper_module.tex_flatten_mipmap_init[..., 3:6]).any(dim = -1, keepdim = True).to(albedo_specular_tex.dtype)
if mask_valid_tex_spec.sum(dim = (0, 1, 2)) == 0:
loss_alb_spec = torch.zeros(1).to(device)
else:
loss_alb_spec = ((albedo_specular_tex * mask_valid_tex_spec).sum(dim = (0, 1, 2)) / mask_valid_tex_spec.sum(dim = (0, 1, 2)) - 0.5).abs().sum() / num_channel
mask_valid_tex_diff = (albedo_diffuse_tex != texture_mapper_module.tex_flatten_mipmap_init[..., 0:3]).any(dim = -1, keepdim = True).to(albedo_diffuse_tex.dtype)
if mask_valid_tex_diff.sum(dim = (0, 1, 2)) == 0:
loss_alb_diff = torch.zeros(1).to(device)
else:
loss_alb_diff = ((albedo_diffuse_tex * mask_valid_tex_diff).sum(dim = (0, 1, 2)) / mask_valid_tex_diff.sum(dim = (0, 1, 2)) - 0.5).abs().sum() / num_channel
loss_alb = (loss_alb_spec + loss_alb_diff) * opt.loss_alb_weight
# total loss
loss_g = loss_lighting + loss_rn + loss_rays_lt_chrom + loss_alb
# compute gradients
optimizer_step = True
if not optimizer_step:
loss_g.backward(retain_graph = True)
else:
loss_g.backward(retain_graph = False)
# optimize
if optimizer_step:
optimizerG.step()
optimizerG.zero_grad()
# error metrics
with torch.no_grad():
err_metrics_batch_i_final = metric.compute_err_metrics_batch(outputs_final[0] * 255.0, img_gt[0] * 255.0, alpha_map_central, compute_ssim = False)
if has_view_relight:
err_metrics_batch_i_final_relight = metric.compute_err_metrics_batch(outputs_final_relight[0] * 255.0, img_relight_gt[0] * 255.0, alpha_map_central, compute_ssim = False)
if has_lighting_gt:
lighting_sh_coeff_mae = (lighting_sh_coeff_gt.to(lighting_sh_coeff_est.dtype) - lighting_sh_coeff_est).abs().sum()
err_metrics_batch_i_lp = metric.compute_err_metrics_batch(lp_est.permute((2, 0, 1))[None, :] * 255.0, lp_gt.to(lp_est.dtype).permute((2, 0, 1))[None, :] * 255.0, torch.ones_like(lp_est).permute((2, 0, 1))[None, :], compute_ssim = False)
# tensorboard scalar logs of training data
if optimizer_step:
writer.add_scalar("loss_g", loss_g, iter)
writer.add_scalar("loss_lighting", loss_lighting, iter)
writer.add_scalar("loss_rn", loss_rn, iter)
writer.add_scalar("loss_rays_lt_chrom", loss_rays_lt_chrom, iter)
writer.add_scalar("loss_alb", loss_alb, iter)
writer.add_scalar("final_mae_valid", err_metrics_batch_i_final['mae_valid_mean'], iter)
writer.add_scalar("final_psnr_valid", err_metrics_batch_i_final['psnr_valid_mean'], iter)
if has_view_relight:
writer.add_scalar("final_relight_mae_valid", err_metrics_batch_i_final_relight['mae_valid_mean'], iter)
writer.add_scalar("final_relight_psnr_valid", err_metrics_batch_i_final_relight['psnr_valid_mean'], iter)
if has_lighting_gt:
writer.add_scalar("lighting_sh_coeff_mae", lighting_sh_coeff_mae, iter)
writer.add_scalar("lp_mae_valid", err_metrics_batch_i_lp['mae_valid_mean'], iter)
writer.add_scalar("lp_psnr_valid", err_metrics_batch_i_lp['psnr_valid_mean'], iter)
end = time.time()
print("Iter %07d Epoch %03d loss_g %0.4f mae_valid %0.4f psnr_valid %0.4f t_total %0.4f" % (iter, epoch, loss_g, err_metrics_batch_i_final['mae_valid_mean'], err_metrics_batch_i_final['psnr_valid_mean'], end - start))
# tensorboard figure logs of training data
if not iter % opt.log_freq:
output_final_vs_gt = []
for i in range(num_view):
output_final_vs_gt.append(outputs_final[i].clamp(min = 0., max = 1.))
output_final_vs_gt.append(img_gt[i].clamp(min = 0., max = 1.))
output_final_vs_gt.append((outputs_final[i] - img_gt[i]).abs().clamp(min = 0., max = 1.))
output_final_vs_gt = torch.cat(output_final_vs_gt, dim = 0)
writer.add_image("output_final_vs_gt",
torchvision.utils.make_grid(output_final_vs_gt,
nrow = batch_size,
range = (0, 1),
scale_each = False,
normalize = False).cpu().detach().numpy(),
iter)
lp_init_est_gt = []
if has_lighting_init:
lp_init_est_gt.append(lp_init.to(lp_est.dtype).permute((2, 0, 1))[None, :].clamp(min = 0., max = 1.))
lp_init_est_gt.append(lp_est.permute((2, 0, 1))[None, :].clamp(min = 0., max = 1.))
if has_lighting_gt:
lp_init_est_gt.append(lp_gt.to(lp_est.dtype).permute((2, 0, 1))[None, :].clamp(min = 0., max = 1.))
lp_init_est_gt.append((lp_est - lp_gt.to(lp_est.dtype)).abs().permute((2, 0, 1))[None, :].clamp(min = 0., max = 1.))
lp_init_est_gt = torch.cat(lp_init_est_gt, dim = 0)
writer.add_image("lp_init_est_gt",
torchvision.utils.make_grid(lp_init_est_gt,
nrow = 1,
range = (0, 1),
scale_each = False,
normalize = False).cpu().detach().numpy(),
iter)
if has_lighting_relight:
relight_final_est_gt = []
for i in range(num_view):
relight_final_est_gt.append(outputs_final_relight[i].clamp(min = 0., max = 1.))
if has_view_relight:
relight_final_est_gt.append(img_relight_gt[i].clamp(min = 0., max = 1.))
relight_final_est_gt.append((outputs_final_relight[i] - img_relight_gt[i]).abs().clamp(min = 0., max = 1.))
relight_final_est_gt = torch.cat(relight_final_est_gt, dim = 0)
writer.add_image("relight_final_est_gt",
torchvision.utils.make_grid(relight_final_est_gt,
nrow = batch_size,
range = (0, 1),
scale_each = False,
normalize = False).cpu().detach().numpy(),
iter)
# validation
if not iter % opt.val_freq:
start_val = time.time()
with torch.no_grad():
# error metrics
err_metrics_val = {}
err_metrics_val['mae_valid'] = []
err_metrics_val['mse_valid'] = []
err_metrics_val['psnr_valid'] = []
err_metrics_val['ssim_valid'] = []
err_metrics_val_relight = {}
err_metrics_val_relight['mae_valid'] = []
err_metrics_val_relight['mse_valid'] = []
err_metrics_val_relight['psnr_valid'] = []
err_metrics_val_relight['ssim_valid'] = []
# gcn features
v_feature = gcn(gcn_input).to(device)
# loop over batches
batch_id = 0
for view_val_trgt in view_val_dataloader:
start_val_i = time.time()
# get view data
TBN_map = view_val_trgt[0]['TBN_map'].to(device) # [N, H, W, 3, 3]
uv_map = view_val_trgt[0]['uv_map'].to(device) # [N, H, W, 2]
sh_basis_map = view_val_trgt[0]['sh_basis_map'].to(device) # [N, H, W, 9]
normal_map = view_val_trgt[0]['normal_map'].to(device) # [N, H, W, 3]
view_dir_map = view_val_trgt[0]['view_dir_map'].to(device) # [N, H, W, 3]
view_dir_map_tangent = view_val_trgt[0]['view_dir_map_tangent'].to(device) # [N, H, W, 3]
alpha_map = view_val_trgt[0]['alpha_map'][:, None, :, :].to(device) # [N, 1, H, W]
view_idx = view_val_trgt[0]['idx']
batch_size = alpha_map.shape[0]
img_h = alpha_map.shape[2]
img_w = alpha_map.shape[3]
num_view = len(view_val_trgt)
img_gt = []
for i in range(num_view):
img_gt.append(view_val_trgt[i]['img_gt'].to(device)) # [N, C, H, W]
# sample texture
neural_img = texture_mapper(uv_map, sh_basis_map, sh_start_ch = 6) # [N, C, H, W]
albedo_diffuse = neural_img[:, :3, :, :]
albedo_specular = neural_img[:, 3:6, :, :]
# sample specular rays
rays_dir, rays_uv, rays_dir_tangent = ray_sampler(TBN_map, view_dir_map_tangent, alpha_map.permute((0, 2, 3, 1))) # [N, H, W, 3, num_ray], [N, H, W, 2, num_ray], [N, H, W, 3, num_ray]
num_ray = rays_uv.shape[-1]
# sample diffuse rays
rays_diffuse_dir, rays_diffuse_uv, _ = ray_sampler_diffuse(TBN_map, view_dir_map_tangent, alpha_map.permute((0, 2, 3, 1))) # [N, H, W, 3, num_ray], [N, H, W, 2, num_ray]
num_ray_diffuse = rays_diffuse_uv.shape[-1]
num_ray_total = num_ray + num_ray_diffuse
# concat data
rays_dir = torch.cat((rays_dir, rays_diffuse_dir), dim = -1)
rays_uv = torch.cat((rays_uv, rays_diffuse_uv), dim = -1)
# estimate light transport for rays
render_net_input = torch.cat((rays_dir.permute((0, -1, -2, 1, 2)).reshape((batch_size, -1, img_h, img_w)),
normal_map.permute((0, 3, 1, 2)),
view_dir_map.permute((0, 3, 1, 2)),
neural_img), dim = 1)
rays_lt = render_net(render_net_input, v_feature).reshape((batch_size, num_ray_total, -1, img_h, img_w)) # [N, num_ray, C, H, W]
rays_lt = (rays_lt * 0.5 + 0.5) * lt_max_val # map to [0, lt_max_val]
outputs_final, _, _, _, _, _, _ = ray_renderer(albedo_specular, rays_uv, rays_lt, lighting_idx = opt.lighting_idx, albedo_diffuse = albedo_diffuse, num_ray_diffuse = num_ray_diffuse, seperate_albedo = True)
outputs_final = [outputs_final] # [N, C, H, W]
# relight
if has_lighting_relight:
# ray renderer
outputs_final_relight, _, _, _, _, _, _ = ray_renderer(albedo_specular, rays_uv, rays_lt, lp = lp_relight[None, :].expand(batch_size, -1, -1, -1), albedo_diffuse = albedo_diffuse, num_ray_diffuse = num_ray_diffuse, seperate_albedo = True)
outputs_final_relight = [outputs_final_relight] # [N, C, H, W]
# relight gt
if has_view_relight:
img_relight_gt = []
for i in range(batch_size):
img_relight_gt.append(view_val_dataset_relight.views_all[view_idx[i]]['img_gt'])
img_relight_gt = torch.stack(img_relight_gt).to(device)
img_relight_gt = [img_relight_gt]
# apply alpha
for i in range(num_view):
outputs_final[i] = outputs_final[i] * alpha_map
img_gt[i] = img_gt[i] * alpha_map
if has_lighting_relight:
outputs_final_relight[i] = outputs_final_relight[i] * alpha_map
if has_view_relight:
img_relight_gt[i] = img_relight_gt[i] * alpha_map
# tensorboard figure logs of validation data
if batch_id == val_log_batch_id:
output_final_vs_gt = []
for i in range(num_view):
output_final_vs_gt.append(outputs_final[i].clamp(min = 0., max = 1.))
output_final_vs_gt.append(img_gt[i].clamp(min = 0., max = 1.))
output_final_vs_gt.append((outputs_final[i] - img_gt[i]).abs().clamp(min = 0., max = 1.))
output_final_vs_gt = torch.cat(output_final_vs_gt, dim = 0)
writer.add_image("output_final_vs_gt_val",
torchvision.utils.make_grid(output_final_vs_gt,
nrow = batch_size,
range = (0, 1),
scale_each = False,
normalize = False).cpu().detach().numpy(),
iter)
if has_lighting_relight:
relight_final_est_gt = []
for i in range(num_view):
relight_final_est_gt.append(outputs_final_relight[i].clamp(min = 0., max = 1.))
if has_view_relight:
relight_final_est_gt.append(img_relight_gt[i].clamp(min = 0., max = 1.))
relight_final_est_gt.append((outputs_final_relight[i] - img_relight_gt[i]).abs().clamp(min = 0., max = 1.))
relight_final_est_gt = torch.cat(relight_final_est_gt, dim = 0)
writer.add_image("relight_final_est_gt_val",
torchvision.utils.make_grid(relight_final_est_gt,
nrow = batch_size,
range = (0, 1),
scale_each = False,
normalize = False).cpu().detach().numpy(),
iter)
# error metrics
err_metrics_batch_i_final = metric.compute_err_metrics_batch(outputs_final[0] * 255.0, img_gt[0] * 255.0, alpha_map, compute_ssim = True)
if has_view_relight:
err_metrics_batch_i_final_relight = metric.compute_err_metrics_batch(outputs_final_relight[0] * 255.0, img_relight_gt[0] * 255.0, alpha_map, compute_ssim = True)
for i in range(batch_size):
for key in list(err_metrics_val.keys()):
if key in err_metrics_batch_i_final.keys():
err_metrics_val[key].append(err_metrics_batch_i_final[key][i])
if has_view_relight:
err_metrics_val_relight[key].append(err_metrics_batch_i_final_relight[key][i])
# save images
for i in range(batch_size):
cv2.imwrite(os.path.join(val_out_dir, str(iter).zfill(8) + '_' + str(view_idx[i].cpu().detach().numpy()).zfill(5) + '.png'), outputs_final[0][i, :].permute((1, 2, 0)).cpu().detach().numpy()[:, :, ::-1] * 255.)
cv2.imwrite(os.path.join(val_err_dir, str(iter).zfill(8) + '_' + str(view_idx[i].cpu().detach().numpy()).zfill(5) + '.png'), (outputs_final[0] - img_gt[0]).abs().clamp(min = 0., max = 1.)[i, :].permute((1, 2, 0)).cpu().detach().numpy()[:, :, ::-1] * 255.)
if first_val:
cv2.imwrite(os.path.join(val_gt_dir, str(view_idx[i].cpu().detach().numpy()).zfill(5) + '.png'), img_gt[0][i, :].permute((1, 2, 0)).cpu().detach().numpy()[:, :, ::-1] * 255.)
cv2.imwrite(os.path.join(val_relight_out_dir, str(iter).zfill(8) + '_' + str(view_idx[i].cpu().detach().numpy()).zfill(5) + '.png'), outputs_final_relight[0][i, :].permute((1, 2, 0)).cpu().detach().numpy()[:, :, ::-1] * 255.)
if has_view_relight:
cv2.imwrite(os.path.join(val_relight_err_dir, str(iter).zfill(8) + '_' + str(view_idx[i].cpu().detach().numpy()).zfill(5) + '.png'), (outputs_final_relight[0] - img_relight_gt[0]).abs().clamp(min = 0., max = 1.)[i, :].permute((1, 2, 0)).cpu().detach().numpy()[:, :, ::-1] * 255.)
if first_val:
cv2.imwrite(os.path.join(val_relight_gt_dir, str(view_idx[i].cpu().detach().numpy()).zfill(5) + '.png'), img_relight_gt[0][i, :].permute((1, 2, 0)).cpu().detach().numpy()[:, :, ::-1] * 255.)
end_val_i = time.time()
print("Val batch %03d mae_valid %0.4f psnr_valid %0.4f ssim_valid %0.4f t_total %0.4f" % (batch_id, err_metrics_batch_i_final['mae_valid_mean'], err_metrics_batch_i_final['psnr_valid_mean'], err_metrics_batch_i_final['ssim_valid_mean'], end_val_i - start_val_i))
batch_id += 1
for key in list(err_metrics_val.keys()):
if err_metrics_val[key]:
err_metrics_val[key] = | np.vstack(err_metrics_val[key]) | numpy.vstack |
import numpy as np
a = np.array([[0, 1, 1 / 2, 0, 1 / 4, 1 / 2, 0],
[1 / 5, 0, 1 / 2, 1 / 3, 0, 0, 0],
[1 / 5, 0, 0, 1 / 3, 1 / 4, 0, 0],
[1 / 5, 0, 0, 0, 1 / 4, 0, 0],
[1 / 5, 0, 0, 1 / 3, 0, 1 / 2, 1],
[0, 0, 0, 0, 1 / 4, 0, 0],
[1 / 5, 0, 0, 0, 0, 0, 0]], dtype=float)
pr = np.array([[1 / 2],
[1 / 3],
[1 / 4],
[0],
[0],
[0],
[0]], dtype=float)
# print Pagerank matrix
def show_matrix(matrix, pr):
print()
print('Metrix:')
n = len(pr)
for i in range(n):
for j in range(n):
print(matrix[i][j], ' ', end=' ')
print()
print()
print('Pr:')
for i in range(n):
print(pr[i][0], ' ', end=' ')
print('\nSize:', len(pr))
def normalization(a):
sumCol = np.sum(a, axis=0)
for i in range(a.shape[0]):
if sumCol[i] == 0:
print('col: %d, sum: %.5f' % (i, sumCol[i]))
continue
for j in range(a.shape[1]):
a[j][i] = a[j][i] / sumCol[i]
return a
def firstPr(c):
pr = np.zeros((c.shape[0], 1), dtype=float)
# sum = np.sum(c, axis=0)[0]
# print(sum)
for i in range(c.shape[0]):
pr[i] = c[i][0] / c.shape[0]
# print pr,"\n==================================================="
return pr
'''
Calculate pagerank weight of anormaly_list or normal_list
:arg
:return
operation weight:
weight[operation][0]: operation
weight[operation][1]: weight
'''
def trace_pagerank(operation_operation, operation_trace, trace_operation, pr_trace, anomaly):
operation_length = len(operation_operation)
trace_length = len(operation_trace)
p_ss = np.zeros((operation_length, operation_length), dtype=np.float32)
p_sr = np.zeros((operation_length, trace_length), dtype=np.float32)
p_rs = np.zeros((trace_length, operation_length), dtype=np.float32)
# matrix = np.zeros((n, n), dtype=np.float32)
pr = | np.zeros((trace_length, 1), dtype=np.float32) | numpy.zeros |
import numpy as np
from PIL import Image
import os
from keras.preprocessing import image
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator as IDG
from sklearn.preprocessing import LabelBinarizer as LB
def extract_data_from_lst(lst,input_shape,crop_shape=None, preprocess=True, flip=None):
x = []
for file in lst:
im = read_input_img(file,input_shape,crop_shape,flip)
x += [np.asarray(im,dtype='float32')]
x = np.array(x)
if preprocess:
x = img_preprocess(x)
return x
def extract_feature(model,lst):
features = []
for file in lst:
im = read_input_img(file,model.input_shape[1:])
feature = model.predict(np.asarray(im,dtype='float32').reshape((-1,) + model.input_shape[1:]))
features += [np.squeeze(feature)]
return np.array(features)
def crop_image(im,crop_shape):
assert im.width > crop_shape[0] and im.height > crop_shape[1], 'error crop size'
a = np.random.randint(im.width-crop_shape[0]+1)
b = np.random.randint(im.height-crop_shape[1]+1)
c = a + crop_shape[0]
d = b + crop_shape[1]
return im.crop((a,b,c,d))
def random_flip(im,seed=None):
a = np.random.rand()
if a > 0.5:
im = im.transpose(Image.FLIP_LEFT_RIGHT)
return im
def generate_train_lst(dire):
x = []
y = []
cam = []
#files = os.listdir(dire)
files = sorted(os.listdir(dire))
for file in files:
if file.endswith('.jpg'):
x += [dire + file]
if file.startswith('-'):
y += [-1]
cam += [int(file[4])]
else:
y += [int(file[:4])]
cam += [int(file[6])]
return np.array(x),np.array(y),np.array(cam)
def read_input_img(file,shape,crop_shape=None,flip=None):
im = Image.open(file)
if flip is not None:
im = random_flip(im)
im = im.resize((shape[0],shape[1]))
if crop_shape is None:
return im
im = crop_image(im,crop_shape=crop_shape)
return im
def gen_pairs(s,bid,y, kmap, label_set, batch_size, pos_ratio, neg_ratio):
#id_left = np.random.randint(0,len(y),batch_size).tolist()
id_left = s[bid*batch_size:(bid+1)*batch_size]
num_clss = len(label_set)
id_right = []
y_diff,y_cls1,y_cls2 = [],[],[]
threshold = pos_ratio/(pos_ratio + neg_ratio)
for idx in id_left:
v = np.random.rand()
ln = kmap[y[idx]]
y_cls1 += [to_categorical(ln,num_clss).squeeze()]
if v > threshold:
ln = (np.random.randint(1,num_clss) + ln) % num_clss
y_diff += [[0,1]]
else:
y_diff += [[1,0]]
rn = np.random.randint(len(label_set[ln]))
id_right += [label_set[ln][rn]]
y_cls2 += [to_categorical(ln,num_clss).squeeze()]
return id_left, id_right, y_diff
def image_base_generator(data,batch_size,input_shape,crop_shape=None):
lst,y = data['lst'],data['label']
num_ins = len(y)
clss = np.unique(y)
num_clss = clss.shape[0]
num_batchs = num_ins // batch_size
kmap = {v:k for k,v in enumerate(clss)}
s = np.arange(num_ins)
img_cache = {}
while True:
s = np.random.permutation(s)
for batch in range(num_batchs):
indices = s[batch*batch_size:(batch+1)*batch_size]
X = process_images(lst[indices],img_cache,input_shape,crop_shape)
label = np.array([to_categorical(kmap[y[i]],num_clss).squeeze() for i in indices])
yield X,label
def image_quintuple_generator(lst_files,input_shape,batch_size,crop_shape=None):
pos_ratio, neg_ratio = 1,1
pos_limit, neg_limit = 1,4
pos_factor, neg_factor = 1,1.01
img_cache = {}
f = np.load(lst_files)
lst,y = f['lst'],f['label']
num_ins = len(y)
num_batches = num_ins // batch_size + 1
clss = np.unique(y)
num_clss = clss.shape[0]
kmap = { v:k for k,v in enumerate(clss)}
label_set = [np.where(y == c)[0] for c in clss]
s = np.arange(num_ins)
while True:
s=np.random.permutation(s)
#loop per epoch
for bid in range(num_batches):
id_left, id_right, y_diff = gen_pairs(s,bid,y,kmap,label_set, batch_size,pos_ratio, neg_ratio)
Xleft = process_images([lst[i] for i in id_left],img_cache,input_shape,crop_shape)
Xright = process_images([lst[i] for i in id_right],img_cache,input_shape,crop_shape)
Y_diff = np.array(y_diff)
Y_cls1 = np.array([to_categorical(kmap[y[i]],num_clss).squeeze() for i in id_left])
Y_cls2 = np.array([to_categorical(kmap[y[i]],num_clss).squeeze() for i in id_right])
yield [Xleft, Xright], [Y_diff, Y_cls1, Y_cls2]
pos_ratio = min(pos_ratio * pos_factor, pos_limit)
neg_ratio = min(neg_ratio * neg_factor, neg_limit)
def cache_read(img_name, img_cache,input_shape,crop_shape):
if img_name not in img_cache:
img = read_input_img(img_name,input_shape,crop_shape)
img_cache[img_name] = img
return img_cache[img_name]
def process_images(img_names, img_cache,input_shape,crop_shape):
if crop_shape is None:
X = np.zeros((len(img_names), input_shape[0], input_shape[1], 3))
else:
X = np.zeros((len(img_names), crop_shape[0], crop_shape[1], 3))
for idx, img_name in enumerate(img_names):
im = cache_read(img_name, img_cache,input_shape,crop_shape)
X[idx] = np.asarray(im,dtype='float32')
X[:,:,:,2] -= 97.8286
X[:,:,:,1] -= 99.0468
X[:,:,:,0] -= 105.606
return X
def img_preprocess(imgs, shift = (97.8286,99.046,105.606)):
imgs[:,:,:,0] -= shift[2]
imgs[:,:,:,1] -= shift[1]
imgs[:,:,:,2] -= shift[0]
#imgs[:,:,:,0] /= 255
#imgs[:,:,:,1] /= 255
#imgs[:,:,:,2] /= 255
return imgs
def create_pairs(x,y,neg_times=1):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
neg_size = neg_times
clss = np.unique(y)
num_clss = len(clss)
digit_indices = [np.where(y == c)[0] for c in clss]
pairs = []
label_diff = []
label_clss = []
for d in range(num_clss):
n = len(digit_indices[d])
for i in range(n):
inc = np.random.randint(1,n)
dn = (i + inc) % n
z1, z2 = digit_indices[d][i], digit_indices[d][dn]
l1 = to_categorical(d, num_clss).squeeze()
pairs += [[x[z1], x[z2]]]
label_diff += [[1,0]]
label_clss += [[l1, l1]]
incs = | np.random.randint(1, num_clss, neg_size) | numpy.random.randint |
#!/bin/env python3
import os, sys
# sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import numpy as np
from ..measure.intersection import intersection
def testme():
pass
#test cases
test = True
a, b = 1, 2
phi = np.linspace(3, 10, 100)
x1 = a*phi - b*np.sin(phi)
y1 = a - b*np.cos(phi)
x2=phi
y2=np.sin(phi)+2
x,y=intersection(x1,y1,x2,y2)
assert( | np.array(x) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse.linalg
import time
import datetime
import glob
import os
import platform
import random
from scipy.stats import norm
from scipy.optimize import fsolve
import scipy.stats as st
from nngeometry.layercollection import LayerCollection
from nngeometry.generator import Jacobian
from nngeometry.object import FMatDense
import torch
from torch import autograd
from torch.utils.data import DataLoader, TensorDataset
np.set_printoptions(precision=2)
EPS_BUFFER = 1e-12
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# def Get_all_relu_layer_output_as_vector(full_model, data_test):
# layer_output_dict = {}
#
# def Get_layer_output(layer_name):
# def hook(model, input, output):
# layer_output_dict[layer_name] = output.detach()
#
# return hook
#
# relu_layer_idx_list = [int(name) for name, layer in full_model.main.named_modules() if isinstance(layer, torch.nn.ReLU)]
# for relu_layer_idx in relu_layer_idx_list:
# full_model.main[relu_layer_idx].register_forward_hook(Get_layer_output(f"main_{relu_layer_idx}"))
#
# output_test = full_model(data_test)
# relu_output_vec = torch.cat([layer_output_dict[relu_layer].view(-1, 1) for relu_layer in layer_output_dict], dim=0)
# return relu_output_vec
def Get_NTK_using_nngeometry(model, out_dim, test_data, centering=True):
# model = G
# out_dim = args.x_dim
# test_data = z_test_torch_no_grad
# centering = True
batch_size = test_data.shape[0]
dataset_test = TensorDataset(test_data, torch.ones(batch_size).to(device))
dataLoader_test = DataLoader(dataset_test, shuffle=False, batch_size=batch_size)
jacobian_generator = Jacobian(layer_collection=LayerCollection.from_model(model),
model=model,
n_output=out_dim,
centering=centering)
ntk_MatDense = FMatDense(jacobian_generator, examples=dataLoader_test)
ntk_torch_tensor = ntk_MatDense.get_dense_tensor()
ntk_torch_mat = ntk_torch_tensor.reshape(ntk_torch_tensor.shape[0] * ntk_torch_tensor.shape[1], -1)
return ntk_torch_mat
def Effective_rank_torch(kernel_mat_torch, eps=1e-12, top_k=None, sparse_eigs=True):
# kernel_mat_torch = ntk_centered
# sparse_eigs = True
if sparse_eigs:
if top_k is None:
top_k = np.min([100, kernel_mat_torch.shape[0]])
kernel_mat_eigvals, _ = scipy.sparse.linalg.eigs(kernel_mat_torch.detach().cpu().numpy(), top_k)
kernel_mat_torch_eigvals_modulus = np.absolute(kernel_mat_eigvals)
else:
kernel_mat_torch_eigvals, _ = torch.eig(kernel_mat_torch)
kernel_mat_torch_eigvals_modulus = np.linalg.norm(kernel_mat_torch_eigvals.detach().cpu().numpy(), axis=1, ord=2)
kernel_mat_torch_eigvals_modulus_normalized = kernel_mat_torch_eigvals_modulus / np.sum(kernel_mat_torch_eigvals_modulus)
kernel_mat_torch_eigvals_modulus_normalized_entropy = -np.sum(kernel_mat_torch_eigvals_modulus_normalized * np.log(kernel_mat_torch_eigvals_modulus_normalized + eps))
kernel_mat_effective_rank = | np.exp(kernel_mat_torch_eigvals_modulus_normalized_entropy) | numpy.exp |
import os, time, sys, io
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tight_layout as tlt
from iminuit import Minuit, describe
from datetime import datetime
from obspy.signal.detrend import polynomial
import bead_util as bu
import peakdetect as pdet
import dill as pickle
import scipy.optimize as opti
import scipy.signal as signal
import scipy.constants as constants
from tqdm import tqdm
from joblib import Parallel, delayed
n_core = 20
plt.rcParams.update({'font.size': 14})
date = '20200322'
date = '20200924'
# fig_base = '/home/cblakemore/plots/20190626/'
savefig = True
fig_base = '/home/cblakemore/plots/{:s}/spinning/'.format(date)
#fig_base = '/home/cblakemore/plots/spinsim/'
suffix = ''
# suffix = '_less-yrange'
#suffix = '_3_5e-6mbar_110kHz_real-noise'
#dirname = '/data/old_trap_processed/spinning/ringdown/20190626/'
dirname = '/data/old_trap_processed/spinning/ringdown/{:s}/'.format(date)
#dirname = '/data/old_trap_processed/spinning/ringdown_manual/{:s}/'.format(date)
paths, lengths = bu.find_all_fnames(dirname, ext='.p')
newpaths = paths
# # for 20190626:
# newpaths = [paths[1], paths[2]]
# labels = ['Initial', 'Later']
# mbead = 85.0e-15 # convert picograms to kg
# mbead_err = 1.6e-15
priors = False
manual_priors = False
fix_fterm = False
fit_end_time = 3000.0
exp_fit_end_time = 3000.0
two_point_end_time = 3000.0
tau_ylim = (1100, 1400)
# tau_ylim = (1850,2050)
both_two_point = False
err_adjust = 5.0
# newpaths = [#dirname + '100kHz_start_4_all.p', \
# #dirname + '100kHz_start_5_all.p', \
# #dirname + '100kHz_start_6_all.p', \
# #dirname + '100kHz_start_7_all.p', \
# #dirname + '100kHz_start_8_all.p', \
# #dirname + '100kHz_start_9_all.p', \
# #dirname + '100kHz_start_10_all.p', \
# #dirname + '100kHz_start_11_all.p', \
# #dirname + '100kHz_start_12_all.p', \
# #dirname + '100kHz_start_13_all.p', \
# #dirname + '100kHz_start_14_all.p', \
# #dirname + '50kHz_start_1_all.p', \
# #dirname + '50kHz_start_2_all.p', \
# #dirname + '50kHz_start_3_all.p', \
# #dirname + '110kHz_start_1_all.p', \
# #dirname + '110kHz_start_2_all.p', \
# #dirname + '110kHz_start_3_all.p', \
# #dirname + '110kHz_start_4_all.p', \
# #dirname + '110kHz_start_5_all.p', \
# #dirname + '110kHz_start_6_all.p', \
# dirname + '110kHz_start_2_coarse_all.p', \
# dirname + '110kHz_start_3_coarse_all.p', \
# dirname + '110kHz_start_5_coarse_all.p', \
# dirname + '110kHz_start_6_coarse_all.p', \
# ]
newpaths = [\
# os.path.join(dirname, '110kHz_start_1_all.p'), \
os.path.join(dirname, '110kHz_start_2_all.p'), \
os.path.join(dirname, '110kHz_start_3_all.p'), \
]
sim_data = False
sim_path = '/data/old_trap_processed/spinsim_data/spindowns_processed/sim_110kHz_real-noise/'
sim_fig_base = '/home/cblakemore/plots/spinsim/'
sim_suffix = '_3_5e-6mbar_110kHz_real-noise'
paths, lengths = bu.find_all_fnames(sim_path, ext='.p')
sim_prior_data = [0.0, 1]
if sim_data:
newpaths = paths[:50]
labels = []
for pathind, path in enumerate(newpaths):
labels.append('Meas. {:d}'.format(pathind))
def gauss(x, A, mu, sigma, c):
return A * np.exp( -1.0 * (x - mu)**2 / (2.8 * sigma**2)) + c
def ngauss(x, A, mu, sigma, c, n=2):
return A * np.exp(-1.0*np.abs(x-mu)**n / (2.0*sigma**n)) + c
def fit_fun(x, A, mu, sigma):
return ngauss(x, A, mu, sigma, 0, n=5)
#if manual_priors:
# fterm_dirname = '/data/old_trap_processed/spinning/ringdown/20191017/'
fterm_dirname = '/data/old_trap_processed/spinning/ringdown/20200322/'
fterm_paths = [fterm_dirname + 'term_velocity_check_1.npy', \
fterm_dirname + 'term_velocity_check_2.npy', \
#fterm_dirname + 'term_velocity_check_3.npy', \
# fterm_dirname + 'term_velocity_check_4.npy', \
# fterm_dirname + 'term_velocity_check_5.npy', \
# fterm_dirname + 'term_velocity_check_6.npy', \
# fterm_dirname + 'term_velocity_check_7.npy', \
]
all_fterm = []
for pathind, path in enumerate(fterm_paths):
data = np.load(open(path, 'rb'))
#plt.plot(data[1])
#plt.show()
all_fterm += list(data[1])
all_fterm = np.array(all_fterm)
fig_term, ax_term = plt.subplots(1,1,dpi=200)
vals, bin_edge, _ = ax_term.hist(all_fterm, density=True)
bins = bin_edge[:-1] + 0.5*(bin_edge[1] - bin_edge[0])
prior_popt, prior_pcov = opti.curve_fit(fit_fun, bins, vals, maxfev=10000,\
p0=[1, np.mean(all_fterm), np.std(all_fterm)])
plot_x = np.linspace(np.mean(all_fterm)-np.std(all_fterm), \
np.mean(all_fterm)+np.std(all_fterm), 100)
plot_x_2 = np.linspace(np.mean(all_fterm) - 3.0*np.std(all_fterm), \
np.mean(all_fterm) + 3.0*np.std(all_fterm), 100)
ax_term.plot(plot_x, 0.5* | np.max(vals) | numpy.max |
#
# OtterTune - async_tasks.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import random
import queue
import numpy as np
from celery.task import task, Task
from celery.utils.log import get_task_logger
from djcelery.models import TaskMeta
from sklearn.preprocessing import StandardScaler
from analysis.gp import GPRNP
from analysis.gp_tf import GPRGD
from analysis.preprocessing import Bin, DummyEncoder
from analysis.constraints import ParamConstraintHelper
from website.models import PipelineData, PipelineRun, Result, Workload, KnobCatalog, MetricCatalog
from website.parser import Parser
from website.types import PipelineTaskType
from website.utils import DataUtil, JSONUtil
from website.settings import IMPORTANT_KNOB_NUMBER, NUM_SAMPLES, TOP_NUM_CONFIG # pylint: disable=no-name-in-module
from website.settings import (DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUDE,
MAX_TRAIN_SIZE, BATCH_SIZE, NUM_THREADS,
DEFAULT_RIDGE, DEFAULT_LEARNING_RATE,
DEFAULT_EPSILON, MAX_ITER, GPR_EPS,
DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER)
from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY
from website.types import VarType
LOG = get_task_logger(__name__)
class UpdateTask(Task): # pylint: disable=abstract-method
def __init__(self):
self.rate_limit = '50/m'
self.max_retries = 3
self.default_retry_delay = 60
class AggregateTargetResults(UpdateTask): # pylint: disable=abstract-method
def on_success(self, retval, task_id, args, kwargs):
super(AggregateTargetResults, self).on_success(retval, task_id, args, kwargs)
# Completely delete this result because it's huge and not
# interesting
task_meta = TaskMeta.objects.get(task_id=task_id)
task_meta.result = None
task_meta.save()
class MapWorkload(UpdateTask): # pylint: disable=abstract-method
def on_success(self, retval, task_id, args, kwargs):
super(MapWorkload, self).on_success(retval, task_id, args, kwargs)
# Replace result with formatted result
if not args[0]['bad']:
new_res = {
'scores': sorted(args[0]['scores'].items()),
'mapped_workload_id': args[0]['mapped_workload'],
}
task_meta = TaskMeta.objects.get(task_id=task_id)
task_meta.result = new_res # Only store scores
task_meta.save()
else:
task_meta = TaskMeta.objects.get(task_id=task_id)
task_meta.result = None
task_meta.save()
class ConfigurationRecommendation(UpdateTask): # pylint: disable=abstract-method
def on_success(self, retval, task_id, args, kwargs):
super(ConfigurationRecommendation, self).on_success(retval, task_id, args, kwargs)
result_id = args[0]['newest_result_id']
result = Result.objects.get(pk=result_id)
# Replace result with formatted result
formatted_params = Parser.format_dbms_knobs(result.dbms.pk, retval['recommendation'])
task_meta = TaskMeta.objects.get(task_id=task_id)
retval['recommendation'] = formatted_params
task_meta.result = retval
task_meta.save()
# Create next configuration to try
config = Parser.create_knob_configuration(result.dbms.pk, retval['recommendation'])
retval['recommendation'] = config
result.next_configuration = JSONUtil.dumps(retval)
result.save()
@task(base=AggregateTargetResults, name='aggregate_target_results')
def aggregate_target_results(result_id):
# Check that we've completed the background tasks at least once. We need
# this data in order to make a configuration recommendation (until we
# implement a sampling technique to generate new training data).
latest_pipeline_run = PipelineRun.objects.get_latest()
newest_result = Result.objects.get(pk=result_id)
if latest_pipeline_run is None or newest_result.session.tuning_session == 'randomly_generate':
result = Result.objects.filter(pk=result_id)
knobs_ = KnobCatalog.objects.filter(dbms=result[0].dbms, tunable=True)
knobs_catalog = {k.name: k for k in knobs_}
knobs = {k: v for k, v in
list(knobs_catalog.items())}
# generate a config randomly
random_knob_result = gen_random_data(knobs)
agg_data = DataUtil.aggregate_data(result)
agg_data['newest_result_id'] = result_id
agg_data['bad'] = True
agg_data['config_recommend'] = random_knob_result
return agg_data
# Aggregate all knob config results tried by the target so far in this
# tuning session and this tuning workload.
target_results = Result.objects.filter(session=newest_result.session,
dbms=newest_result.dbms,
workload=newest_result.workload)
if len(target_results) == 0:
raise Exception('Cannot find any results for session_id={}, dbms_id={}'
.format(newest_result.session, newest_result.dbms))
agg_data = DataUtil.aggregate_data(target_results)
agg_data['newest_result_id'] = result_id
agg_data['bad'] = False
return agg_data
def gen_random_data(knobs):
random_knob_result = {}
for name, metadata in list(knobs.items()):
if metadata.vartype == VarType.BOOL:
flag = random.randint(0, 1)
if flag == 0:
random_knob_result[name] = False
else:
random_knob_result[name] = True
elif metadata.vartype == VarType.ENUM:
enumvals = metadata.enumvals.split(',')
enumvals_len = len(enumvals)
rand_idx = random.randint(0, enumvals_len - 1)
random_knob_result[name] = rand_idx
elif metadata.vartype == VarType.INTEGER:
random_knob_result[name] = random.randint(int(metadata.minval), int(metadata.maxval))
elif metadata.vartype == VarType.REAL:
random_knob_result[name] = random.uniform(
float(metadata.minval), float(metadata.maxval))
elif metadata.vartype == VarType.STRING:
random_knob_result[name] = "None"
elif metadata.vartype == VarType.TIMESTAMP:
random_knob_result[name] = "None"
else:
raise Exception(
'Unknown variable type: {}'.format(metadata.vartype))
return random_knob_result
@task(base=ConfigurationRecommendation, name='configuration_recommendation')
def configuration_recommendation(target_data):
LOG.info('configuration_recommendation called')
latest_pipeline_run = PipelineRun.objects.get_latest()
if target_data['bad'] is True:
target_data_res = {}
target_data_res['status'] = 'bad'
target_data_res['info'] = 'WARNING: no training data, the config is generated randomly'
target_data_res['recommendation'] = target_data['config_recommend']
return target_data_res
# Load mapped workload data
mapped_workload_id = target_data['mapped_workload'][0]
mapped_workload = Workload.objects.get(pk=mapped_workload_id)
workload_knob_data = PipelineData.objects.get(
pipeline_run=latest_pipeline_run,
workload=mapped_workload,
task_type=PipelineTaskType.KNOB_DATA)
workload_knob_data = JSONUtil.loads(workload_knob_data.data)
workload_metric_data = PipelineData.objects.get(
pipeline_run=latest_pipeline_run,
workload=mapped_workload,
task_type=PipelineTaskType.METRIC_DATA)
workload_metric_data = JSONUtil.loads(workload_metric_data.data)
X_workload = np.array(workload_knob_data['data'])
X_columnlabels = np.array(workload_knob_data['columnlabels'])
y_workload = np.array(workload_metric_data['data'])
y_columnlabels = np.array(workload_metric_data['columnlabels'])
rowlabels_workload = np.array(workload_metric_data['rowlabels'])
# Target workload data
newest_result = Result.objects.get(pk=target_data['newest_result_id'])
X_target = target_data['X_matrix']
y_target = target_data['y_matrix']
rowlabels_target = np.array(target_data['rowlabels'])
if not np.array_equal(X_columnlabels, target_data['X_columnlabels']):
raise Exception(('The workload and target data should have '
'identical X columnlabels (sorted knob names)'))
if not np.array_equal(y_columnlabels, target_data['y_columnlabels']):
raise Exception(('The workload and target data should have '
'identical y columnlabels (sorted metric names)'))
# Filter Xs by top 10 ranked knobs
ranked_knobs = PipelineData.objects.get(
pipeline_run=latest_pipeline_run,
workload=mapped_workload,
task_type=PipelineTaskType.RANKED_KNOBS)
ranked_knobs = JSONUtil.loads(ranked_knobs.data)[:IMPORTANT_KNOB_NUMBER]
ranked_knob_idxs = [i for i, cl in enumerate(X_columnlabels) if cl in ranked_knobs]
X_workload = X_workload[:, ranked_knob_idxs]
X_target = X_target[:, ranked_knob_idxs]
X_columnlabels = X_columnlabels[ranked_knob_idxs]
# Filter ys by current target objective metric
target_objective = newest_result.session.target_objective
target_obj_idx = [i for i, cl in enumerate(y_columnlabels) if cl == target_objective]
if len(target_obj_idx) == 0:
raise Exception(('Could not find target objective in metrics '
'(target_obj={})').format(target_objective))
elif len(target_obj_idx) > 1:
raise Exception(('Found {} instances of target objective in '
'metrics (target_obj={})').format(len(target_obj_idx),
target_objective))
metric_meta = MetricCatalog.objects.get_metric_meta(newest_result.session.dbms,
newest_result.session.target_objective)
if metric_meta[target_objective].improvement == '(less is better)':
lessisbetter = True
else:
lessisbetter = False
y_workload = y_workload[:, target_obj_idx]
y_target = y_target[:, target_obj_idx]
y_columnlabels = y_columnlabels[target_obj_idx]
# Combine duplicate rows in the target/workload data (separately)
X_workload, y_workload, rowlabels_workload = DataUtil.combine_duplicate_rows(
X_workload, y_workload, rowlabels_workload)
X_target, y_target, rowlabels_target = DataUtil.combine_duplicate_rows(
X_target, y_target, rowlabels_target)
# Delete any rows that appear in both the workload data and the target
# data from the workload data
dups_filter = np.ones(X_workload.shape[0], dtype=bool)
target_row_tups = [tuple(row) for row in X_target]
for i, row in enumerate(X_workload):
if tuple(row) in target_row_tups:
dups_filter[i] = False
X_workload = X_workload[dups_filter, :]
y_workload = y_workload[dups_filter, :]
rowlabels_workload = rowlabels_workload[dups_filter]
# Combine target & workload Xs for preprocessing
X_matrix = np.vstack([X_target, X_workload])
# Dummy encode categorial variables
categorical_info = DataUtil.dummy_encoder_helper(X_columnlabels,
mapped_workload.dbms)
dummy_encoder = DummyEncoder(categorical_info['n_values'],
categorical_info['categorical_features'],
categorical_info['cat_columnlabels'],
categorical_info['noncat_columnlabels'])
X_matrix = dummy_encoder.fit_transform(X_matrix)
# below two variables are needed for correctly determing max/min on dummies
binary_index_set = set(categorical_info['binary_vars'])
total_dummies = dummy_encoder.total_dummies()
# Scale to N(0, 1)
X_scaler = StandardScaler()
X_scaled = X_scaler.fit_transform(X_matrix)
if y_target.shape[0] < 5: # FIXME
# FIXME (dva): if there are fewer than 5 target results so far
# then scale the y values (metrics) using the workload's
# y_scaler. I'm not sure if 5 is the right cutoff.
y_target_scaler = None
y_workload_scaler = StandardScaler()
y_matrix = np.vstack([y_target, y_workload])
y_scaled = y_workload_scaler.fit_transform(y_matrix)
else:
# FIXME (dva): otherwise try to compute a separate y_scaler for
# the target and scale them separately.
try:
y_target_scaler = StandardScaler()
y_workload_scaler = StandardScaler()
y_target_scaled = y_target_scaler.fit_transform(y_target)
y_workload_scaled = y_workload_scaler.fit_transform(y_workload)
y_scaled = np.vstack([y_target_scaled, y_workload_scaled])
except ValueError:
y_target_scaler = None
y_workload_scaler = StandardScaler()
y_scaled = y_workload_scaler.fit_transform(y_target)
# Set up constraint helper
constraint_helper = ParamConstraintHelper(scaler=X_scaler,
encoder=dummy_encoder,
binary_vars=categorical_info['binary_vars'],
init_flip_prob=INIT_FLIP_PROB,
flip_prob_decay=FLIP_PROB_DECAY)
# FIXME (dva): check if these are good values for the ridge
# ridge = np.empty(X_scaled.shape[0])
# ridge[:X_target.shape[0]] = 0.01
# ridge[X_target.shape[0]:] = 0.1
# FIXME: we should generate more samples and use a smarter sampling
# technique
num_samples = NUM_SAMPLES
X_samples = np.empty((num_samples, X_scaled.shape[1]))
X_min = np.empty(X_scaled.shape[1])
X_max = np.empty(X_scaled.shape[1])
knobs_mem = KnobCatalog.objects.filter(
dbms=newest_result.session.dbms, tunable=True, resource=1)
knobs_mem_catalog = {k.name: k for k in knobs_mem}
mem_max = newest_result.workload.hardware.memory
X_mem = np.zeros([1, X_scaled.shape[1]])
X_default = np.empty(X_scaled.shape[1])
# Get default knob values
for i, k_name in enumerate(X_columnlabels):
k = KnobCatalog.objects.filter(dbms=newest_result.session.dbms, name=k_name)[0]
X_default[i] = k.default
X_default_scaled = X_scaler.transform(X_default.reshape(1, X_default.shape[0]))[0]
# Determine min/max for knob values
for i in range(X_scaled.shape[1]):
if i < total_dummies or i in binary_index_set:
col_min = 0
col_max = 1
else:
col_min = X_scaled[:, i].min()
col_max = X_scaled[:, i].max()
if X_columnlabels[i] in knobs_mem_catalog:
X_mem[0][i] = mem_max * 1024 * 1024 * 1024 # mem_max GB
col_max = min(col_max, X_scaler.transform(X_mem)[0][i])
# Set min value to the default value
# FIXME: support multiple methods can be selected by users
col_min = X_default_scaled[i]
X_min[i] = col_min
X_max[i] = col_max
X_samples[:, i] = np.random.rand(num_samples) * (col_max - col_min) + col_min
# Maximize the throughput, moreisbetter
# Use gradient descent to minimize -throughput
if not lessisbetter:
y_scaled = -y_scaled
q = queue.PriorityQueue()
for x in range(0, y_scaled.shape[0]):
q.put((y_scaled[x][0], x))
i = 0
while i < TOP_NUM_CONFIG:
try:
item = q.get_nowait()
# Tensorflow get broken if we use the training data points as
# starting points for GPRGD. We add a small bias for the
# starting points. GPR_EPS default value is 0.001
# if the starting point is X_max, we minus a small bias to
# make sure it is within the range.
dist = sum(np.square(X_max - X_scaled[item[1]]))
if dist < 0.001:
X_samples = np.vstack((X_samples, X_scaled[item[1]] - abs(GPR_EPS)))
else:
X_samples = np.vstack((X_samples, X_scaled[item[1]] + abs(GPR_EPS)))
i = i + 1
except queue.Empty:
break
model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE,
magnitude=DEFAULT_MAGNITUDE,
max_train_size=MAX_TRAIN_SIZE,
batch_size=BATCH_SIZE,
num_threads=NUM_THREADS,
learning_rate=DEFAULT_LEARNING_RATE,
epsilon=DEFAULT_EPSILON,
max_iter=MAX_ITER,
sigma_multiplier=DEFAULT_SIGMA_MULTIPLIER,
mu_multiplier=DEFAULT_MU_MULTIPLIER)
model.fit(X_scaled, y_scaled, X_min, X_max, ridge=DEFAULT_RIDGE)
res = model.predict(X_samples, constraint_helper=constraint_helper)
best_config_idx = np.argmin(res.minl.ravel())
best_config = res.minl_conf[best_config_idx, :]
best_config = X_scaler.inverse_transform(best_config)
# Decode one-hot encoding into categorical knobs
best_config = dummy_encoder.inverse_transform(best_config)
# Although we have max/min limits in the GPRGD training session, it may
# lose some precisions. e.g. 0.99..99 >= 1.0 may be True on the scaled data,
# when we inversely transform the scaled data, the different becomes much larger
# and cannot be ignored. Here we check the range on the original data
# directly, and make sure the recommended config lies within the range
X_min_inv = X_scaler.inverse_transform(X_min)
X_max_inv = X_scaler.inverse_transform(X_max)
best_config = np.minimum(best_config, X_max_inv)
best_config = np.maximum(best_config, X_min_inv)
conf_map = {k: best_config[i] for i, k in enumerate(X_columnlabels)}
conf_map_res = {}
conf_map_res['status'] = 'good'
conf_map_res['recommendation'] = conf_map
conf_map_res['info'] = 'INFO: training data size is {}'.format(X_scaled.shape[0])
return conf_map_res
def load_data_helper(filtered_pipeline_data, workload, task_type):
pipeline_data = filtered_pipeline_data.get(workload=workload,
task_type=task_type)
LOG.debug("PIPELINE DATA: %s", str(pipeline_data.data))
return JSONUtil.loads(pipeline_data.data)
@task(base=MapWorkload, name='map_workload')
def map_workload(target_data):
# Get the latest version of pipeline data that's been computed so far.
latest_pipeline_run = PipelineRun.objects.get_latest()
if target_data['bad']:
assert target_data is not None
return target_data
assert latest_pipeline_run is not None
newest_result = Result.objects.get(pk=target_data['newest_result_id'])
target_workload = newest_result.workload
X_columnlabels = | np.array(target_data['X_columnlabels']) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 00:17:13 2021
@author: alankar
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.integrate import solve_ivp
import h5py
import sys
from scipy import interpolate
import sys
from decimal import Decimal
def fexp(number):
(sign, digits, exponent) = Decimal(number).as_tuple()
return len(digits) + exponent - 1
def fman(number):
return Decimal(number).scaleb(-fexp(number)).normalize()
Msun = 2e33
yr = 365*24*60**2
mp = 1.6726219e-24
kB = 1.380649e-16
pc = 3.086e18
kpc = 1e3*pc
X = 0.7154
Y = 0.2703
Z = 0.0143
mu = 1./(2*X+0.75*Y+0.5625*Z)
mue = 2./(1+X)
mui = 1./(1/mu-1/mue)
Tfloor = 1.e4
def deriv(x, y):
if y[0]>0:
print ("negative density!",x)
#sys.exit()
d = np.abs(-1./(y[0]*x**q))
if d<=0: d = -d
p = y[1]*d**gamma # tilde density and pressure
T = mu*mp*c0*c0*p/(kB*d*gamma) # temperature in CGS
Lam = Lambda(T)/Lam0 #tilde Lambda
if (T<=Tfloor):
T = Tfloor
Lam = 0. #Lambda(T)*(T/Tfloor)**(-10.)/Lam0
Num1 = q*(c0/v0)**2*( d*Lam*(1+gamma_m/(gamma*beta0))/y[0] + (p+gamma_m*d**gamma_m/(gamma*beta0))/(x*d) )
Den1 = (1 - (c0/v0)**2*(p+gamma_m*d**gamma_m/(gamma*beta0))/(d*y[0]**2))*y[0]
return [ Num1/Den1, -q*gamma*(1+gamma_m/(gamma*beta0))*Lam*d**(2-gamma)/y[0] ]
def Lambda(temp): #returns cooling function in cgs
klo=0; khi=tab_sz-1
while (klo != (khi-1)):
kmid = int((khi+klo)/2)
Tmid = Ttab[kmid]
if (temp<=Tmid):
khi = kmid
if (temp>Tmid):
klo = kmid
dT = Ttab[khi] - Ttab[klo]
scrh = Ltab[klo]*(Ttab[khi]-temp)/dT + Ltab[khi]*(temp-Ttab[klo])/dT; #linear interpolation
return scrh
'''
def Lambda(temp):
lam = 0.0
if (temp<=1.e7 and temp>=1.e4):
lam = 2.7e-23*(temp/1.e7)**-1.0
if (temp<1.e4):
lam = 2.7e-20*(temp/1.e4)**20
#lam = 1.e-100
if (temp>1.e7):
lam = 2.7e-23*(temp/1.e7)**-20
#lam = 1.e-100
return lam
'''
D = np.loadtxt('./cooltable.dat')
global Ttab, Ltab, tab_sz
Ttab = D[:,0]; Ltab = D[:,1]; tab_sz = np.size(Ttab)
global q, gamma, gamma_m, beta0, c0, v0, Lam0, T0, d0, v0byc0
gamma_m = 1.03; beta0 = 1e10
q=2; gamma=5./3.
mu = 0.62; mue = 1.17; mui = 1./(1./mu - 1./mue)
#v0byc0 = 0.099; T0 = 5.1e5; d0 = 1.96e-3*mu*mp ; c0 = np.sqrt(gamma*kB*T0/mu/mp)
#v0byc0 = 0.049; T0 = 3.8e5; d0 = 7.9e-4*mu*mp ; c0 = np.sqrt(gamma*kB*T0/mu/mp)
for logT0 in np.linspace(4,7,1000):
for logn0 in np.linspace(-4,-1,1000):
v0byc0 = 0.05; T0 = 10**logT0; d0 = (10**logn0)*mu*mp ; c0 = np.sqrt(gamma*kB*T0/mu/mp) #5kpc
shift = 1.0
ct0=c0* | np.sqrt(1+gamma_m/(gamma*beta0)) | numpy.sqrt |
import os.path
import cv2
import numpy as np
class DepthTracker:
def __init__(self, intrinsic_matrix, scale_factor=1.0, coordinate=None):
self.scale_factor = scale_factor
self.fx = intrinsic_matrix[0, 0] * scale_factor
self.fy = intrinsic_matrix[1, 1] * scale_factor
self.cx = intrinsic_matrix[0, 2] * scale_factor
self.cy = intrinsic_matrix[1, 2] * scale_factor
self.coordinate = coordinate
def compute_dist(self, new_coord):
if self.coordinate is None:
self.coordinate = new_coord
return 0
dist = np.linalg.norm(new_coord - self.coordinate)
self.coordinate = new_coord
return dist
def get_coordinates(self, x1, y1, depth):
print(f"depth at center: {depth}")
x = (x1 - self.cx) * depth / self.fx
y = (y1 - self.cy) * depth / self.fy
result = np.array([x, y, depth])
return result
def get_coords_bb(self, img_name):
input_img = cv2.resize(cv2.imread(f"./input/{img_name}.jpg"), (0, 0), fx=self.scale_factor, fy=self.scale_factor)
bounding_box = cv2.selectROI('frame', input_img, fromCenter=False, showCrosshair=True)
center_x, center_y, height_pixels = bounding_box[0], bounding_box[1], bounding_box[3]
return self.get_coordinates(center_x, center_y, 7.38)
if __name__ == "__main__":
intrinsic_matrix = np.loadtxt('../camera_calibration/intrinsics.cfg')
depth_tracker = DepthTracker(intrinsic_matrix, scale_factor=0.25)
prev_coords = None
for image in ["PXL_20210418_183601908", "PXL_20210418_183618429"]:
coords = depth_tracker.get_coords_bb(image) # 6.63
print(f"in image {image} richard is at {coords}")
if prev_coords is not None:
dist = | np.linalg.norm(prev_coords - coords) | numpy.linalg.norm |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..utils import remove_difficult
from .base_representation import BaseRepresentation
class Detection(BaseRepresentation):
def __init__(self, identifier='', labels=None, x_mins=None, y_mins=None, x_maxs=None, y_maxs=None, metadata=None):
super().__init__(identifier, metadata)
self.labels = np.array(labels) if labels is not None else np.array([])
self.x_mins = np.array(x_mins) if x_mins is not None else np.array([])
self.y_mins = np.array(y_mins) if y_mins is not None else np.array([])
self.x_maxs = np.array(x_maxs) if x_maxs is not None else np.array([])
self.y_maxs = np.array(y_maxs) if y_maxs is not None else np.array([])
def remove(self, indexes):
self.labels = np.delete(self.labels, indexes)
self.x_mins = np.delete(self.x_mins, indexes)
self.y_mins = np.delete(self.y_mins, indexes)
self.x_maxs = np.delete(self.x_maxs, indexes)
self.y_maxs = np.delete(self.y_maxs, indexes)
difficult_boxes = self.metadata.get('difficult_boxes')
if not difficult_boxes:
return
new_difficult_boxes = remove_difficult(difficult_boxes, indexes)
self.metadata['difficult_boxes'] = new_difficult_boxes
@property
def size(self):
return len(self.x_mins)
@property
def boxes(self):
if self.size == 0:
return []
return [[x_min, y_min, x_max, y_max]
for x_min, y_min, x_max, y_max in zip(self.x_mins, self.y_mins, self.x_maxs, self.y_maxs)]
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
def are_bounding_boxes_equal():
if not np.array_equal(self.labels, other.labels):
return False
if not np.array_equal(self.x_mins, other.x_mins):
return False
if not np.array_equal(self.y_mins, other.y_mins):
return False
if not | np.array_equal(self.x_maxs, other.x_maxs) | numpy.array_equal |
import os
from copy import deepcopy
import pickle
import argparse
import re
from distutils.util import strtobool
import numpy as np
import psutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from transformers import AutoTokenizer, get_constant_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
from models.seqtransformer import SeqTransformer
from data.meta_dataset import meta_dataset
from data.utils.tokenizer import manual_tokenizer, specials
from data.utils.data_loader_numpy import StratifiedLoader, StratifiedLoaderwClassesSubset
from data.utils.sampling import dataset_sampler
from utils.metrics import logging_metrics
from utils.timing import Timer
from utils.seed import set_seed
def train(args):
def _get_dataloader(datasubset, tokenizer, device, args, subset_classes=True):
"""
Get specific dataloader.
Args:
datasubset ([type]): [description]
tokenizer ([type]): [description]
device ([type]): [description]
args ([type]): [description]
Returns:
dataloader
"""
if subset_classes:
dataloader = StratifiedLoaderwClassesSubset(datasubset, k=args['k'],
max_classes=args['max_classes'],
max_batch_size=args['max_batch_size'],
tokenizer=tokenizer,
device=device,
shuffle=True,
verbose=False)
else:
dataloader = StratifiedLoader(datasubset, k=args['k'],
max_batch_size=args['max_batch_size'],
tokenizer=tokenizer,
device=device,
shuffle=True,
verbose=False)
return dataloader
def _adapt_and_fit(support_labels, support_input, query_labels, query_input, loss_fn, model_init, args, mode="train"):
"""
Adapts the init model to a support set and computes loss on query set.
Args:
support_labels ([type]): [description]
support_text ([type]): [description]
query_labels ([type]): [description]
query_text ([type]): [description]
model_init ([type]): [description]
args
mode
"""
#####################
# Create model_task #
#####################
if (not args['dropout']) and mode == "train":
for module in model_init.modules():
if isinstance(module, nn.Dropout):
module.eval()
else:
module.train()
elif mode != "train":
model_init.eval()
else:
model_init.train()
model_task = deepcopy(model_init)
model_task_optimizer = optim.SGD(model_task.parameters(),
lr=args['inner_lr'])
model_task.zero_grad()
#######################
# Generate prototypes #
#######################
y = model_init(support_input)
labs = torch.sort(torch.unique(support_labels))[0]
prototypes = torch.stack([torch.mean(y[support_labels == c], dim=0) for c in labs])
W_init = 2 * prototypes
b_init = -torch.norm(prototypes, p=2, dim=1)**2
W_task, b_task = W_init.detach(), b_init.detach()
W_task.requires_grad, b_task.requires_grad = True, True
#################
# Adapt to data #
#################
for _ in range(args['n_inner']):
y = model_task(support_input)
logits = F.linear(y, W_task, b_task)
inner_loss = loss_fn(logits, support_labels)
W_task_grad, b_task_grad = torch.autograd.grad(inner_loss,\
[W_task, b_task], retain_graph=True)
inner_loss.backward()
if args['clip_val'] > 0:
torch.nn.utils.clip_grad_norm_(model_task.parameters(),
args['clip_val'])
model_task_optimizer.step()
W_task = W_task - args['output_lr'] * W_task_grad
b_task = b_task - args['output_lr'] * b_task_grad
if args['print_inner_loss']:
print(f"\tInner Loss: {inner_loss.detach().cpu().item()}")
#########################
# Validate on query set #
#########################
if mode == "train":
for module in model_task.modules():
if isinstance(module, nn.Dropout):
module.eval()
W_task = W_init + (W_task - W_init).detach()
b_task = b_init + (b_task - b_init).detach()
y = model_task(query_input)
logits = F.linear(y, W_task, b_task)
outer_loss = loss_fn(logits, query_labels)
if mode == "train":
model_task_params = [param for param in model_task.parameters() if param.requires_grad]
model_task_grads = torch.autograd.grad(outer_loss, model_task_params,
retain_graph=True)
model_init_params = [param for param in model_init.parameters() if param.requires_grad]
model_init_grads = torch.autograd.grad(outer_loss, model_init_params,
retain_graph=False)
model_init_grads = model_init_grads + model_task_grads
for param, grad in zip(model_init_params, model_init_grads):
if param.grad != None:
param.grad += grad.detach()
else:
param.grad = grad.detach()
else:
del model_task, W_task, b_task, W_task_grad, b_task_grad, prototypes, W_init, b_init
if outer_loss.detach().cpu().item() > 10:
print(outer_loss.detach().cpu().item(),
inner_loss.detach().cpu().item())
return logits.detach(), outer_loss.detach()
#######################
# Logging Directories #
#######################
log_dir = os.path.join(args['checkpoint_path'], args['version'])
os.makedirs(log_dir, exist_ok=True)
os.makedirs(os.path.join(log_dir, 'tensorboard'), exist_ok=True)
os.makedirs(os.path.join(log_dir, 'checkpoint'), exist_ok=True)
#print(f"Saving models and logs to {log_dir}")
checkpoint_save_path = os.path.join(log_dir, 'checkpoint')
with open(os.path.join(log_dir, 'checkpoint', 'hparams.pickle'), 'wb') as file:
pickle.dump(args, file)
##########################
# Device, Logging, Timer #
##########################
set_seed(args['seed'])
timer = Timer()
device = torch.device('cuda' if (torch.cuda.is_available() and args['gpu']) else 'cpu')
# Build the tensorboard writer
writer = SummaryWriter(os.path.join(log_dir, 'tensorboard'))
###################
# Load in dataset #
###################
print("Data Prep")
dataset = meta_dataset(include=args['include'], verbose=True)
dataset.prep(text_tokenizer=manual_tokenizer)
print("")
####################
# Init models etc. #
####################
model_init = SeqTransformer(args)
tokenizer = AutoTokenizer.from_pretrained(args['encoder_name'])
tokenizer.add_special_tokens({'additional_special_tokens': specials()})
model_init.encoder.model.resize_token_embeddings(len(tokenizer.vocab))
if args['optimizer'] == "Adam":
meta_optimizer = optim.Adam(model_init.parameters(), lr=args['meta_lr'])
elif args['optimizer'] == "SGD":
meta_optimizer = optim.SGD(model_init.parameters(), lr=args['meta_lr'])
meta_scheduler = get_constant_schedule_with_warmup(meta_optimizer, args['warmup_steps'])
reduceOnPlateau = optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, mode='max',
factor=args['lr_reduce_factor'],
patience=args['patience'],
verbose=True)
model_init = model_init.to(device)
loss_fn = nn.CrossEntropyLoss()
#################
# Training loop #
#################
best_overall_acc_s = 0.0
for episode in range(1, args['max_episodes']+1):
outer_loss_agg, acc_agg, f1_agg = 0.0, 0.0, 0.0
outer_loss_s_agg, acc_s_agg, f1_s_agg = 0.0, 0.0, 0.0
for ii in range(1, args['n_outer']+1):
#################
# Sample a task #
#################
task = dataset_sampler(dataset, sampling_method='sqrt')
datasubset = dataset.datasets[task]['train']
dataloader = _get_dataloader(datasubset, tokenizer, device,
args, subset_classes=args['subset_classes'])
support_labels, support_input, query_labels, query_input = next(dataloader)
logits, outer_loss = _adapt_and_fit(support_labels, support_input,
query_labels, query_input,
loss_fn, model_init, args,
mode="train")
######################
# Inner Loop Logging #
######################
with torch.no_grad():
mets = logging_metrics(logits.detach().cpu(), query_labels.detach().cpu())
outer_loss_ = outer_loss.detach().cpu().item()
acc = mets['acc']
f1 = mets['f1']
outer_loss_s = outer_loss_/ np.log(dataloader.n_classes)
acc_s = acc / (1/dataloader.n_classes)
f1_s = f1/(1/dataloader.n_classes)
outer_loss_agg += outer_loss_ / args['n_outer']
acc_agg += acc / args['n_outer']
f1_agg += f1 / args['n_outer']
outer_loss_s_agg += outer_loss_s / args['n_outer']
acc_s_agg += acc_s / args['n_outer']
f1_s_agg += f1_s / args['n_outer']
print("{:} | Train | Episode {:04}.{:02} | Task {:^20s}, N={:} | Loss {:5.2f}, Acc {:5.2f}, F1 {:5.2f} | Mem {:5.2f} GB".format(
timer.dt(), episode, ii, task, dataloader.n_classes,
outer_loss_s if args['print_scaled'] else outer_loss_,
acc_s if args['print_scaled'] else acc,
f1_s if args['print_scaled'] else f1,
psutil.Process(os.getpid()).memory_info().rss / 1024 ** 3))
writer.add_scalars('Loss/Train', {task: outer_loss_}, episode)
writer.add_scalars('Accuracy/Train', {task: acc}, episode)
writer.add_scalars('F1/Train', {task: f1}, episode)
writer.add_scalars('LossScaled/Train', {task: outer_loss_s}, episode)
writer.add_scalars('AccuracyScaled/Train', {task: acc_s}, episode)
writer.add_scalars('F1Scaled/Train', {task: f1_s}, episode)
writer.flush()
############################
# Init Model Backward Pass #
############################
model_init_params = [param for param in model_init.parameters() if param.requires_grad]
#for param in model_init_params:
# param.grad = param.grad #/ args['n_outer']
if args['clip_val'] > 0:
torch.nn.utils.clip_grad_norm_(model_init_params, args['clip_val'])
meta_optimizer.step()
meta_scheduler.step()
if args['warmup_steps'] <= episode + 1:
meta_optimizer.zero_grad()
#####################
# Aggregate Logging #
#####################
print("{:} | MACRO-AGG | Train | Episode {:04} | Loss {:5.2f}, Acc {:5.2f}, F1 {:5.2f}\n".format(
timer.dt(), episode,
outer_loss_s_agg if args['print_scaled'] else outer_loss_agg,
acc_s_agg if args['print_scaled'] else acc_agg,
f1_s_agg if args['print_scaled'] else f1_agg))
writer.add_scalar('Loss/MacroTrain', outer_loss_agg, episode)
writer.add_scalar('Accuracy/MacroTrain', acc_agg, episode)
writer.add_scalar('F1/MacroTrain', f1_agg, episode)
writer.add_scalar('LossScaled/MacroTrain', outer_loss_s_agg, episode)
writer.add_scalar('AccuracyScaled/MacroTrain', acc_s_agg, episode)
writer.add_scalar('F1Scaled/MacroTrain', f1_s_agg, episode)
writer.flush()
##############
# Evaluation #
##############
if (episode % args['eval_every_n']) == 0 or episode==1:
overall_loss, overall_acc, overall_f1 = [], [], []
overall_loss_s, overall_acc_s, overall_f1_s = [], [], []
###################
# Individual Task #
###################
for task in dataset.lens.keys():
datasubset = dataset.datasets[task]['validation']
task_loss, task_acc, task_f1 = [], [], []
task_loss_s, task_acc_s, task_f1_s = [], [], []
for _ in range(args['n_eval_per_task']):
dataloader = _get_dataloader(datasubset, tokenizer, device,
args, subset_classes=args['subset_classes'])
support_labels, support_input, query_labels, query_input = next(dataloader)
logits, loss = _adapt_and_fit(support_labels, support_input,
query_labels, query_input,
loss_fn, model_init, args,
mode="eval")
mets = logging_metrics(logits.detach().cpu(), query_labels.detach().cpu())
task_loss.append(loss.detach().cpu().item())
task_acc.append(mets['acc'])
task_f1.append(mets['f1'])
task_loss_s.append(loss.detach().cpu().item() / | np.log(dataloader.n_classes) | numpy.log |
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import acivate_fun as act
from generate_data import get_normal_data, get_simple_data
from numpy import seterr
import init_utils # 第一部分,初始化
seterr(all='raise')
# 设置常量
# 激活函数的标识字符串
SIGMOID_NAME = 'sigmoid'
TANH_NAME = 'tanh'
RELU_NAME = 'ReLU'
# 深层神经网络主驱动
# net_array = 深度数组,如 [{'neurons': 3, 'activate': 'tanh'}]
# learning_rate = 学习率,默认为 0.12
# train_times = 训练次数,默认为 3000
# random_seed = 随机数的种子,默认为 2021
def deep_neural_network(X, Y
, net_array, learning_rate=0.12
, train_times=3000, random_seed=2021):
# 绘图
x = []
y = []
# 初始化基本参数
net_deep = len(net_array)
net_array[0]['neurons'] = X.shape[0]
numpy.random.seed(random_seed)
m = X.shape[1]
W, b = initial_parameters(net_array)
# 对每一个深度的参数进行保存
Z = [ | np.array([]) | numpy.array |
"""Utility module."""
import numpy as np
import astropy.constants as const
import astropy.units as u
from scipy.interpolate import RectBivariateSpline
from typing import Sequence, Optional, Tuple, Union
import warnings
from .interpolators import Beam
def _get_bl_len_vec(bl_len_ns: Union[float, np.ndarray]) -> np.ndarray:
"""
Convert a baseline length in a variety of formats to a standard length-3 vector.
Parameters
----------
bl_len_ns
The baseline length in nanosec (i.e. 1e9 * metres / c). If scalar, interpreted
as E-W length, if len(2), interpreted as EW and NS length, otherwise the full
[EW, NS, Z] length. Unspecified dimensions are assumed to be zero.
Returns
-------
bl_vec
A length-3 array. The full [EW, NS, Z] baseline vector.
"""
if np.isscalar(bl_len_ns):
return np.array([bl_len_ns, 0, 0])
elif len(bl_len_ns) <= 3:
# make a length-3 array
return np.pad(bl_len_ns, pad_width=3 - len(bl_len_ns), mode="constant")[-3:]
return bl_len_ns
def get_bl_len_magnitude(bl_len_ns: Union[float, np.ndarray, Sequence]) -> float:
"""
Get the magnitude of the length of the given baseline.
Parameters
----------
bl_len_ns
The baseline length in nanosec (i.e. 1e9 * metres / c). If scalar, interpreted
as E-W length, if len(2), interpreted as EW and NS length, otherwise the full
[EW, NS, Z] length. Unspecified dimensions are assumed to be zero.
Returns
-------
mag
The magnitude of the baseline length.
"""
bl_len_ns = _get_bl_len_vec(bl_len_ns)
return np.sqrt(np.sum(bl_len_ns ** 2))
def gen_delay_filter(
freqs: np.ndarray,
bl_len_ns: Union[float, np.ndarray, Sequence],
standoff: float = 0.0,
delay_filter_type: Optional[str] = "gauss",
min_delay: Optional[float] = None,
max_delay: Optional[float] = None,
normalize: Optional[float] = None,
) -> np.ndarray:
"""
Generate a delay filter in delay space.
Parameters
----------
freqs
Frequency array [GHz]
bl_len_ns
The baseline length in nanosec (i.e. 1e9 * metres / c). If scalar, interpreted
as E-W length, if len(2), interpreted as EW and NS length, otherwise the full
[EW, NS, Z] length. Unspecified dimensions are assumed to be zero.
standoff
Supra-horizon buffer [nanosec]
delay_filter_type
Options are ``['gauss', 'trunc_gauss', 'tophat', 'none']``.
This sets the filter profile. ``gauss`` has a 1-sigma as horizon (+ standoff)
divided by four, ``trunc_gauss`` is same but truncated above 1-sigma. ``'none'``
means filter is identically one.
min_delay
Minimum absolute delay of filter
max_delay
Maximum absolute delay of filter
normalize
If set, will normalize the filter such that the power of the output
matches the power of the input times the normalization factor.
If not set, the filter merely has a maximum of unity.
Returns
-------
delay_filter
Delay filter in delay space (1D)
"""
# setup
delays = np.fft.fftfreq(freqs.size, freqs[1] - freqs[0])
if isinstance(bl_len_ns, np.ndarray):
bl_len_ns = np.linalg.norm(bl_len_ns)
# add standoff: four sigma is horizon
one_sigma = (bl_len_ns + standoff) / 4.0
# create filter
if delay_filter_type in [None, "none", "None"]:
delay_filter = | np.ones_like(delays) | numpy.ones_like |
"""
fitting.py
Created by <NAME> on 2017-05-19.
"""
import os
import glob
import inspect
from collections import OrderedDict
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import Planck15
import astropy.constants as const
from . import utils
#from .model import BeamCutout
from .utils import GRISM_COLORS
# Minimum redshift where IGM is applied
IGM_MINZ = 3.4 # blue edge of G800L
# Default parameters for drizzled line map
PLINE = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
# IGM from eazy-py
try:
import eazy.igm
IGM = eazy.igm.Inoue14()
except:
IGM = None
def run_all_parallel(id, get_output_data=False, **kwargs):
import numpy as np
from grizli.fitting import run_all
from grizli import multifit
import time
import traceback
t0 = time.time()
print('Run {0}'.format(id))
args = np.load('fit_args.npy')[0]
args['verbose'] = False
for k in kwargs:
args[k] = kwargs[k]
fp = open('{0}_{1:05d}.log_par'.format(args['group_name'], id),'w')
fp.write('{0}_{1:05d}: {2}\n'.format(args['group_name'], id, time.ctime()))
fp.close()
try:
#args['zr'] = [0.7, 1.0]
#mb = multifit.MultiBeam('j100025+021651_{0:05d}.beams.fits'.format(id))
out = run_all(id, **args)
if get_output_data:
return out
status=1
except:
status=-1
trace = traceback.format_exc(limit=2)#, file=fp)
if args['verbose']:
print(trace)
t1 = time.time()
return id, status, t1-t0
def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], fitter='nnls', group_name='grism', fit_stacks=True, only_stacks=False, prior=None, fcontam=0.2, pline=PLINE, mask_sn_limit=3, fit_only_beams=False, fit_beams=True, root='*', fit_trace_shift=False, phot=None, phot_obj=None, verbose=True, scale_photometry=False, show_beams=True, scale_on_stacked_1d=True, overlap_threshold=5, MW_EBV=0., sys_err=0.03, get_dict=False, bad_pa_threshold=1.6, units1d='flam', redshift_only=False, line_size=1.6, use_psf=False, get_line_width=False, sed_args={'bin':1, 'xlim':[0.3, 9]}, get_ir_psfs=True, min_mask=0.01, min_sens=0.08, **kwargs):
"""Run the full procedure
1) Load MultiBeam and stack files
2) ... tbd
fwhm=1200; zr=[0.65, 1.6]; dz=[0.004, 0.0002]; group_name='grism'; fit_stacks=True; prior=None; fcontam=0.2; mask_sn_limit=3; fit_beams=True; root=''
"""
import glob
import grizli.multifit
from grizli.stack import StackFitter
from grizli.multifit import MultiBeam
if get_dict:
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
for k in ['id', 'get_dict', 'frame', 'glob', 'grizli', 'StackFitter', 'MultiBeam']:
if k in args:
args.pop(k)
return args
mb_files = glob.glob('{0}_{1:05d}.beams.fits'.format(root, id))
st_files = glob.glob('{0}_{1:05d}.stack.fits'.format(root, id))
if not only_stacks:
mb = MultiBeam(mb_files, fcontam=fcontam, group_name=group_name, MW_EBV=MW_EBV, sys_err=sys_err, verbose=verbose, psf=use_psf, min_mask=min_mask, min_sens=min_sens)
# Check for PAs with unflagged contamination or otherwise discrepant
# fit
out = mb.check_for_bad_PAs(chi2_threshold=bad_pa_threshold,
poly_order=1, reinit=True,
fit_background=True)
fit_log, keep_dict, has_bad = out
if has_bad:
if verbose:
print('\nHas bad PA! Final list: {0}\n{1}'.format(keep_dict,
fit_log))
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32)
fig.savefig('{0}_{1:05d}.fix.stack.png'.format(group_name, id))
good_PAs = []
for k in keep_dict:
good_PAs.extend(keep_dict[k])
else:
good_PAs = None # All good
else:
good_PAs = None # All good
redshift_only=True # can't drizzle line maps from stacks
if fit_only_beams:
st = None
else:
st = StackFitter(st_files, fit_stacks=fit_stacks, group_name=group_name, fcontam=fcontam, overlap_threshold=overlap_threshold, MW_EBV=MW_EBV, verbose=verbose, sys_err=sys_err, PAs=good_PAs, chi2_threshold=bad_pa_threshold)
st.initialize_masked_arrays()
if only_stacks:
mb = st
if not only_stacks:
if fit_trace_shift:
b = mb.beams[0]
b.compute_model()
sn_lim = fit_trace_shift*1
if (np.max((b.model/b.grism['ERR'])[b.fit_mask.reshape(b.sh)]) > sn_lim) | (sn_lim > 100):
shift, _ = mb.fit_trace_shift(tol=1.e-3, verbose=verbose,
split_groups=True)
mb.initialize_masked_arrays()
## Get photometry from phot_obj
if (phot is None) & (phot_obj is not None):
phot_i, ii, dd = phot_obj.get_phot_dict(mb.ra, mb.dec)
if dd < 0.5*u.arcsec:
phot = phot_i
if phot is not None:
if phot == 'vizier':
### Get photometry from Vizier catalogs
vizier_catalog = list(utils.VIZIER_BANDS.keys())
phot = utils.get_Vizier_photometry(mb.ra, mb.dec, verbose=verbose,
vizier_catalog=vizier_catalog)
if phot is not None:
zgrid = utils.log_zgrid(zr=zr, dz=0.005)
phot['tempfilt'] = utils.generate_tempfilt(t0,
phot['filters'],
zgrid=zgrid,
MW_EBV=MW_EBV)
if phot is not None:
if st is not None:
st.set_photometry(**phot, min_err=sys_err)
mb.set_photometry(**phot, min_err=sys_err)
if t0 is None:
t0 = utils.load_templates(line_complexes=True, fsps_templates=True, fwhm=fwhm)
if t1 is None:
t1 = utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
# Fit on stacked spectra or individual beams
if fit_only_beams:
fit_obj = mb
else:
fit_obj = st
### Do scaling now with direct spectrum function
if (scale_photometry > 0) & (phot is not None):
try:
scl = mb.scale_to_photometry(z=0, method='lm', templates=t0, order=scale_photometry*1-1)
except:
scl = [10.]
if hasattr(scl,'status'):
if scl.status > 0:
print('scale_to_photometry: [{0}]'.format(', '.join(['{0:.2f}'.format(x_i) for x_i in scl.x])))
mb.pscale = scl.x
if st is not None:
st.pscale = scl.x
# First pass
fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
fit_hdu = pyfits.table_to_hdu(fit)
fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
if hasattr(fit_obj, 'pscale'):
fit_hdu.header['PSCALEN'] = (len(fit_obj.pscale)-1, 'PSCALE order')
for i, p in enumerate(fit_obj.pscale):
fit_hdu.header['PSCALE{0}'.format(i)] = (p, 'PSCALE parameter {0}'.format(i))
# Add photometry information
if (fit_obj.Nphot > 0) & hasattr(fit_obj, 'photom_filters'):
h = fit_hdu.header
h['NPHOT'] = fit_obj.Nphot, 'Number of photometry filters'
h['PHOTSRC'] = fit_obj.photom_source, 'Source of the photometry'
for i in range(len(fit_obj.photom_filters)):
h['PHOTN{0:03d}'.format(i)] = fit_obj.photom_filters[i].name.split()[0], 'Filter {0} name'.format(i)
h['PHOTL{0:03d}'.format(i)] = fit_obj.photom_pivot[i], 'Filter {0} pivot wavelength'.format(i)
h['PHOTF{0:03d}'.format(i)] = fit_obj.photom_flam[i], 'Filter {0} flux flam'.format(i)
h['PHOTE{0:03d}'.format(i)] = fit_obj.photom_eflam[i], 'Filter {0} err flam'.format(i)
# # Second pass if rescaling spectrum to photometry
# if scale_photometry:
# scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1-1)
# if scl.status > 0:
# mb.pscale = scl.x
# if st is not None:
# st.pscale = scl.x
#
# fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
# fit_hdu = pyfits.table_to_hdu(fit)
# fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
# Zoom-in fit with individual beams
if fit_beams:
#z0 = fit.meta['Z50'][0]
z0 = fit.meta['z_map'][0]
#width = np.maximum(3*fit.meta['ZWIDTH1'][0], 3*0.001*(1+z0))
width = 20*0.001*(1+z0)
mb_zr = z0 + width*np.array([-1,1])
mb_fit = mb.xfit_redshift(templates=t0, zr=mb_zr, dz=[0.001, 0.0002], prior=prior, fitter=fitter, verbose=verbose)
mb_fit_hdu = pyfits.table_to_hdu(mb_fit)
mb_fit_hdu.header['EXTNAME'] = 'ZFIT_BEAM'
else:
mb_fit = fit
#### Get best-fit template
tfit = mb.template_at_z(z=mb_fit.meta['z_map'][0], templates=t1, fit_background=True, fitter=fitter)
# Redrizzle? ... testing
if False:
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam,
flambda=False,
size=48, scale=1.,
kernel='point', pixfrac=0.1,
zfit=tfit)
# Fit covariance
cov_hdu = pyfits.ImageHDU(data=tfit['covar'], name='COVAR')
Next = mb_fit.meta['N']
cov_hdu.header['N'] = Next
# Line EWs & fluxes
coeffs_clip = tfit['coeffs'][mb.N:]
covar_clip = tfit['covar'][mb.N:,mb.N:]
lineEW = utils.compute_equivalent_widths(t1, coeffs_clip, covar_clip, max_R=5000, Ndraw=1000, z=tfit['z'])
for ik, key in enumerate(lineEW):
for j in range(3):
if not np.isfinite(lineEW[key][j]):
lineEW[key][j] = -1.e30
cov_hdu.header['FLUX_{0:03d}'.format(ik)] = tfit['cfit'][key][0], '{0} line flux; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['ERR_{0:03d}'.format(ik)] = tfit['cfit'][key][1], '{0} line uncertainty; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['EW16_{0:03d}'.format(ik)] = lineEW[key][0], 'Rest-frame {0} EW, 16th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW50_{0:03d}'.format(ik)] = lineEW[key][1], 'Rest-frame {0} EW, 50th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW84_{0:03d}'.format(ik)] = lineEW[key][2], 'Rest-frame {0} EW, 84th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EWHW_{0:03d}'.format(ik)] = (lineEW[key][2]-lineEW[key][0])/2, 'Rest-frame {0} EW, 1-sigma half-width; Angstrom'.format(key.strip('line '))
# Velocity width
if get_line_width:
if phot is not None:
mb.unset_photometry()
vel_width_res = mb.fit_line_width(z0=tfit['z'], bl=1.2, nl=1.2)
if verbose:
print('Velocity width: BL/NL = {0:.0f}/{1:.0f}, z={2:.4f}'.format(vel_width_res[0]*1000, vel_width_res[1]*1000, vel_width_res[2]))
fit_hdu.header['VEL_BL'] = vel_width_res[0]*1000, 'Broad line FWHM'
fit_hdu.header['VEL_NL'] = vel_width_res[1]*1000, 'Narrow line FWHM'
fit_hdu.header['VEL_Z'] = vel_width_res[2], 'Line width, best redshift'
fit_hdu.header['VEL_NFEV'] = vel_width_res[3], 'Line width, NFEV'
fit_hdu.header['VEL_FLAG'] = vel_width_res[4], 'Line width, NFEV'
if phot is not None:
mb.set_photometry(**phot)
# Best-fit template itself
tfit_sp = utils.GTable()
for ik, key in enumerate(tfit['cfit']):
for save in [tfit_sp.meta]:
save['CVAL{0:03d}'.format(ik)] = tfit['cfit'][key][0], 'Coefficient for {0}'.format(key)
save['CERR{0:03d}'.format(ik)] = tfit['cfit'][key][1], 'Uncertainty for {0}'.format(key)
save['CNAME{0:03d}'.format(ik)] = key, 'Template name'
tfit_sp['wave'] = tfit['cont1d'].wave
tfit_sp['continuum'] = tfit['cont1d'].flux
tfit_sp['full'] = tfit['line1d'].flux
tfit_sp['wave'].unit = tfit['cont1d'].waveunits
tfit_sp['continuum'].unit = tfit['cont1d'].fluxunits
tfit_sp['full'].unit = tfit['line1d'].fluxunits
tfit_hdu = pyfits.table_to_hdu(tfit_sp)
tfit_hdu.header['EXTNAME'] = 'TEMPL'
# Make the plot
fig = mb.xmake_fit_plot(mb_fit, tfit, show_beams=show_beams, scale_on_stacked_1d=scale_on_stacked_1d)
# Add prior
if prior is not None:
fig.axes[0].plot(prior[0], np.log10(prior[1]), color='#1f77b4', alpha=0.5)
# Add stack fit to the existing plot
fig.axes[0].plot(fit['zgrid'], np.log10(fit['pdf']), color='0.5', alpha=0.5)
fig.axes[0].set_xlim(fit['zgrid'].min(), fit['zgrid'].max())
if phot is not None:
fig.axes[1].errorbar(mb.photom_pivot/1.e4, mb.photom_flam/1.e-19, mb.photom_eflam/1.e-19, marker='s', alpha=0.5, color='k', linestyle='None')
#fig.axes[1].plot(tfit['line1d'].wave/1.e4, tfit['line1d'].flux/1.e-19, color='k', alpha=0.2, zorder=100)
# Save the figure
fig.savefig('{0}_{1:05d}.full.png'.format(group_name, id))
if redshift_only:
return mb, st, fit, tfit, None
# Make the line maps
if pline is None:
pzfit, pspec2, pline = grizli.multifit.get_redshift_fit_defaults()
line_hdu = mb.drizzle_fit_lines(tfit, pline, force_line=utils.DEFAULT_LINE_LIST, save_fits=False, mask_lines=True, mask_sn_limit=mask_sn_limit, verbose=verbose, get_ir_psfs=get_ir_psfs)
# Add beam exposure times
exptime = mb.compute_exptime()
for k in exptime:
line_hdu[0].header['T_{0}'.format(k)] = (exptime[k], 'Total exposure time [s]')
line_hdu.insert(1, fit_hdu)
line_hdu.insert(2, cov_hdu)
if fit_beams:
line_hdu.insert(2, mb_fit_hdu)
line_hdu.insert(3, tfit_hdu)
line_hdu.writeto('{0}_{1:05d}.full.fits'.format(group_name, id), clobber=True, output_verify='fix')
# 1D spectrum
oned_hdul = mb.oned_spectrum_to_hdu(tfit=tfit, bin=1, outputfile='{0}_{1:05d}.1D.fits'.format(group_name, id))#, units=units1d)
######
# Show the drizzled lines and direct image cutout, which are
# extensions `DSCI`, `LINE`, etc.
s, si = 1, line_size
s = 4.e-19/np.max([beam.beam.total_flux for beam in mb.beams])
s = np.clip(s, 0.25, 4)
full_line_list = ['Lya', 'OII', 'Hb', 'OIII', 'Ha', 'SII', 'SIII']
fig = show_drizzled_lines(line_hdu, size_arcsec=si, cmap='plasma_r', scale=s, dscale=s, full_line_list=full_line_list)
fig.savefig('{0}_{1:05d}.line.png'.format(group_name, id))
if phot is not None:
out = mb, st, fit, tfit, line_hdu
if 'pz' in phot:
full_sed_plot(mb, tfit, zfit=fit, photometry_pz=phot['pz'], **sed_args)
else:
full_sed_plot(mb, tfit, zfit=fit, **sed_args)
return mb, st, fit, tfit, line_hdu
###################################
def full_sed_plot(mb, tfit, zfit=None, bin=1, minor=0.1, save='png', sed_resolution=180, photometry_pz=None, zspec=None, spectrum_steps=False, xlim=[0.3, 9], **kwargs):
"""
Make a separate plot showing photometry and the spectrum
"""
#import seaborn as sns
import prospect.utils.smoothing
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.gridspec as gridspec
#mpl_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
mpl_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# sns_colors = colors = sns.color_palette("cubehelix", 8)
### seaborn cubehelix colors
sns_colors = colors = [(0.1036, 0.094, 0.206),
(0.0825, 0.272, 0.307),
(0.1700, 0.436, 0.223),
(0.4587, 0.480, 0.199),
(0.7576, 0.476, 0.437),
(0.8299, 0.563, 0.776),
(0.7638, 0.757, 0.949),
(0.8106, 0.921, 0.937)]
# Best-fit
#mb = out[0]
#zfit = out[2]
#tfit = out[3]
t1 = tfit['templates']
best_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux])
flat_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux*0+1])
bg = mb.get_flat_background(tfit['coeffs'])
sp = mb.optimal_extract(mb.scif[mb.fit_mask][:-mb.Nphot] - bg, bin=bin)#['G141']
spm = mb.optimal_extract(best_model, bin=bin)#['G141']
spf = mb.optimal_extract(flat_model, bin=bin)#['G141']
# Photometry
A_phot = mb._interpolate_photometry(z=tfit['z'], templates=t1)
A_model = A_phot.T.dot(tfit['coeffs'])
photom_mask = mb.photom_eflam > -98
##########
# Figure
if True:
if zfit is not None:
fig = plt.figure(figsize=[11, 9./3])
gs = gridspec.GridSpec(1,3, width_ratios=[1,1.5,1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
else:
fig = plt.figure(figsize=[9, 9./3])
gs = gridspec.GridSpec(1,2, width_ratios=[1,1.5])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
else:
gs = None
fig = plt.figure(figsize=[9, 9./3])
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
# Photometry SED
ax1.errorbar(np.log10(mb.photom_pivot[photom_mask]/1.e4), mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=30)
sm = prospect.utils.smoothing.smoothspec(tfit['line1d'].wave, tfit['line1d'].flux, resolution=sed_resolution, smoothtype='R') #nsigma=10, inres=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=11)
yl1 = ax1.get_ylim()
ax1.plot(np.log10(tfit['line1d'].wave/1.e4), sm/1.e-19, color=sns_colors[4], linewidth=1, zorder=0)
#ax1.grid()
ax1.set_xlabel(r'$\lambda$ / $\mu$m')
ax2.set_xlabel(r'$\lambda$ / $\mu$m')
# Spectrum
ymax, ymin = -1e30, 1e30
for g in sp:
sn = sp[g]['flux']/sp[g]['err']
clip = sn > 3
clip = spf[g]['flux'] > 0.2*spf[g]['flux'].max()
try:
scale = mb.compute_scale_array(mb.pscale, sp[g]['wave'])
except:
scale = 1
ax2.errorbar(sp[g]['wave'][clip]/1.e4, (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.5, linestyle='None', elinewidth=0.5, zorder=11)
if spectrum_steps:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, linestyle='steps-mid')
else:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, marker='.')
ymax = np.maximum(ymax, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].max())
ymin = np.minimum(ymin, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].min())
ax1.errorbar(np.log10(sp[g]['wave'][clip]/1.e4), (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.2, linestyle='None', elinewidth=0.5, zorder=-100)
xl, yl = ax2.get_xlim(), ax2.get_ylim()
yl = (ymin-0.3*ymax, 1.3*ymax)
# SED x range
if xlim is None:
okphot = (mb.photom_eflam > 0)
xlim = [np.minimum(xl[0]*0.7, 0.7*mb.photom_pivot[okphot].min()/1.e4), np.maximum(xl[1]/0.7, mb.photom_pivot[okphot].max()/1.e4/0.7)]
ax1.set_xlim(np.log10(xlim[0]), np.log10(xlim[1]))
ticks = np.array([0.5, 1, 2, 4, 8])
ticks = ticks[(ticks >= xlim[0]) & (ticks <= xlim[1])]
ax1.set_xticks(np.log10(ticks))
ax1.set_xticklabels(ticks)
# Back to spectrum
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=11)
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=12)
ax2.errorbar(mb.photom_pivot[photom_mask]/1.e4, mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=20)
ax2.set_xlim(xl); ax2.set_ylim(yl)
ax2.set_yticklabels([])
#ax2.set_xticks(np.arange(1.1, 1.8, 0.1))
#ax2.set_xticklabels([1.1, '', 1.3, '', 1.5, '', 1.7])
ax2.xaxis.set_minor_locator(MultipleLocator(minor))
ax2.xaxis.set_major_locator(MultipleLocator(minor*2))
# Show spectrum range on SED panel
xb, yb = np.array([0, 1, 1, 0, 0]), np.array([0, 0, 1, 1, 0])
ax1.plot(np.log10(xl[0]+xb*(xl[1]-xl[0])), yl[0]+yb*(yl[1]-yl[0]), linestyle=':', color='k', alpha=0.4)
ymax = np.maximum(yl1[1], yl[1]+0.02*(yl[1]-yl[0]))
ax1.set_ylim(-0.1*ymax, ymax)
tick_diff = np.diff(ax1.get_yticks())[0]
ax2.yaxis.set_major_locator(MultipleLocator(tick_diff))
#ax2.set_yticklabels([])
##########
# P(z)
if zfit is not None:
if photometry_pz is not None:
ax3.plot(photometry_pz[0], np.log10(photometry_pz[1]), color=mpl_colors[0])
ax3.plot(zfit['zgrid'], np.log10(zfit['pdf']), color=sns_colors[0])
ax3.fill_between(zfit['zgrid'], np.log10(zfit['pdf']), np.log10(zfit['pdf'])*0-100, color=sns_colors[0], alpha=0.3)
ax3.set_xlim(zfit['zgrid'].min(), zfit['zgrid'].max())
ax3.set_ylim(-3, 2.9) #np.log10(zfit['pdf']).max())
ax3.set_ylabel(r'log $p(z)$')
ax3.set_xlabel(r'$z$')
ax1.set_ylabel(r'$f_\lambda$ / $10^{-19}$')
axt = ax2
axt.text(0.95, 0.95, r'$z_\mathrm{grism}$='+'{0:.3f}'.format(tfit['z']), ha='right', va='top', transform=axt.transAxes, color=sns_colors[0], size=10)#, backgroundcolor='w')
if zspec is not None:
axt.text(0.95, 0.89, r'$z_\mathrm{spec}$='+'{0:.3f}'.format(zspec), ha='right', va='top', transform=axt.transAxes, color='r', size=10)
if zfit is not None:
ax3.scatter(zspec, 2.7, color='r', marker='v', zorder=100)
axt.text(0.05, 0.95, '{0}: {1:>6d}'.format(mb.group_name, mb.id), ha='left', va='top', transform=axt.transAxes, color='k', size=10)#, backgroundcolor='w')
#axt.text(0.05, 0.89, '{0:>6d}'.format(mb.id), ha='left', va='top', transform=axt.transAxes, color='k', size=10)#, backgroundcolor='w')
if gs is None:
gs.tight_layout(pad=0.1)
else:
if zfit is not None:
fig.tight_layout(pad=0.1)
else:
fig.tight_layout(pad=0.5)
if save:
fig.savefig('{0}_{1:05d}.sed.{2}'.format(mb.group_name, mb.id, save))
return fig
def make_summary_catalog(target='pg0117+213', sextractor='pg0117+213-f140w.cat', verbose=True, filter_bandpasses=[]):
import glob
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.io.fits as pyfits
import numpy as np
import grizli
from grizli import utils
keys = OrderedDict()
keys['PRIMARY'] = ['ID','RA','DEC','NINPUT','REDSHIFT','T_G102', 'T_G141', 'T_G800L', 'NUMLINES','HASLINES']
keys['ZFIT_STACK'] = ['CHI2POLY','CHI2SPL','SPLF01','SPLE01','SPLF02','SPLE02','SPLF03','SPLE03','SPLF04','SPLE04', 'DOF','CHIMIN','CHIMAX','BIC_POLY','BIC_SPL','BIC_TEMP','Z02', 'Z16', 'Z50', 'Z84', 'Z97', 'ZWIDTH1', 'ZWIDTH2', 'Z_MAP', 'Z_RISK', 'MIN_RISK', 'VEL_BL','VEL_NL','VEL_Z','VEL_NFEV','VEL_FLAG']
keys['ZFIT_BEAM'] = ['CHI2POLY','CHI2SPL','SPLF01','SPLE01','SPLF02','SPLE02','SPLF03','SPLE03','SPLF04','SPLE04', 'DOF','CHIMIN','CHIMAX','BIC_POLY','BIC_SPL','BIC_TEMP','Z02', 'Z16', 'Z50', 'Z84', 'Z97', 'ZWIDTH1', 'ZWIDTH2', 'Z_MAP', 'Z_RISK', 'MIN_RISK', 'VEL_BL','VEL_NL','VEL_Z','VEL_NFEV','VEL_FLAG']
keys['COVAR'] = ' '.join(['FLUX_{0:03d} ERR_{0:03d} EW50_{0:03d} EWHW_{0:03d}'.format(i) for i in range(24)]).split()
lines = []
pdf_max = []
files=glob.glob('{0}*full.fits'.format(target))
files.sort()
template_mags = []
sps_params = []
for file in files:
print(utils.NO_NEWLINE+file)
line = []
full = pyfits.open(file)
if 'DSCI' not in full:
continue
tab = utils.GTable.read(full['ZFIT_STACK'])
pdf_max.append(tab['pdf'].max())
for ext in keys:
if ext not in full:
for k in keys[ext]:
line.append(np.nan)
continue
h = full[ext].header
for k in keys[ext]:
if k in h:
line.append(h[k])
else:
line.append(np.nan)
# SPS
try:
sps = compute_sps_params(full)
except:
sps = {'Lv':-1*u.solLum, 'MLv':-1*u.solMass/u.solLum, 'MLv_rms':-1*u.solMass/u.solLum, 'SFRv':-1*u.solMass/u.year, 'SFRv_rms':-1*u.solMass/u.year, 'templ':-1}
sps_params.append(sps)
lines.append(line)
# Integrate best-fit template through filter bandpasses
if filter_bandpasses:
tfit = utils.GTable.gread(full['TEMPL'])
sp = utils.SpectrumTemplate(wave=tfit['wave'], flux=tfit['full'])
mags = [sp.integrate_filter(bp, abmag=True)
for bp in filter_bandpasses]
template_mags.append(mags)
columns = []
for ext in keys:
if ext == 'ZFIT_BEAM':
columns.extend(['beam_{0}'.format(k) for k in keys[ext]])
else:
columns.extend(keys[ext])
info = utils.GTable(rows=lines, names=columns)
info['PDF_MAX'] = pdf_max
root_col = utils.GTable.Column(name='root', data=[target]*len(info))
info.add_column(root_col, index=0)
for k in ['Lv','MLv','MLv_rms','SFRv','SFRv_rms']:
datak = [sps[k].value for sps in sps_params]
info[k] = datak
info[k].unit = sps[k].unit
info['sSFR'] = info['SFRv']/info['MLv']
info['stellar_mass'] = info['Lv']*info['MLv']
info['Lv'].format = '.1e'
info['MLv'].format = '.2f'
info['MLv_rms'].format = '.2f'
info['SFRv'].format = '.1f'
info['SFRv_rms'].format = '.1f'
info['sSFR'].format = '.1e'
info['stellar_mass'].format = '.1e'
if filter_bandpasses:
arr = np.array(template_mags)
for i, bp in enumerate(filter_bandpasses):
info['mag_{0}'.format(bp.name)] = arr[:,i]
info['mag_{0}'.format(bp.name)].format = '.3f'
for c in info.colnames:
info.rename_column(c, c.lower())
# Emission line names
files=glob.glob('{0}*full.fits'.format(target))
im = pyfits.open(files[0])
h = im['COVAR'].header
for i in range(24):
key = 'FLUX_{0:03d}'.format(i)
if key not in h:
continue
line = h.comments[key].split()[0]
for root in ['flux','err','ew50','ewhw']:
col = '{0}_{1}'.format(root, line)
info.rename_column('{0}_{1:03d}'.format(root, i), col)
if root.startswith('ew'):
info[col].format = '.1f'
else:
info[col].format = '.1f'
info['sn_{0}'.format(line)] = info['flux_'+line]/info['err_'+line]
info['sn_{0}'.format(line)][info['err_'+line] == 0] = -99
#info['sn_{0}'.format(line)].format = '.1f'
info['chinu'] = info['chimin']/info['dof']
info['chinu'].format = '.2f'
info['bic_diff'] = info['bic_poly'] - info['bic_temp']
info['bic_diff'].format = '.1f'
info['log_risk'] = np.log10(info['min_risk'])
info['log_risk'].format = '.2f'
info['log_pdf_max'] = np.log10(info['pdf_max'])
info['log_pdf_max'].format = '.2f'
info['zq'] = info['log_risk'] - info['log_pdf_max']
info['zq'].format = '.2f'
info['beam_chinu'] = info['beam_chimin']/info['beam_dof']
info['beam_chinu'].format = '.2f'
info['beam_bic_diff'] = info['beam_bic_poly'] - info['beam_bic_temp']
info['beam_bic_diff'].format = '.1f'
info['beam_log_risk'] = np.log10(info['beam_min_risk'])
info['beam_log_risk'].format = '.2f'
# ID with link to CDS
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">{2}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
### PNG columns
for ext in ['stack','full','line']:
png = ['{0}_{1:05d}.{2}.png'.format(target, id, ext) for id in info['id']]
info['png_{0}'.format(ext)] = ['<a href={0}><img src={0} height=200></a>'.format(p) for p in png]
### Column formats
for col in info.colnames:
if col.strip('beam_').startswith('z'):
info[col].format = '.4f'
if col in ['ra','dec']:
info[col].format = '.6f'
### Sextractor catalog
if sextractor is None:
info.write('{0}.info.fits'.format(target), overwrite=True)
return info
#sextractor = glob.glob('{0}-f*cat'.format(target))[0]
try:
hcat = grizli.utils.GTable.gread(sextractor) #, format='ascii.sextractor')
except:
hcat = grizli.utils.GTable.gread(sextractor, sextractor=True)
for c in hcat.colnames:
hcat.rename_column(c, c.lower())
idx, dr = hcat.match_to_catalog_sky(info, self_radec=('x_world', 'y_world'), other_radec=None)
for c in hcat.colnames:
info.add_column(hcat[c][idx])
info.write('{0}.info.fits'.format(target), overwrite=True)
return info
def compute_sps_params(full='j021820-051015_01276.full.fits', cosmology=Planck15):
import numpy as np
from astropy.io import fits as pyfits
from astropy.table import Table
import astropy.units as u
from grizli import utils
import pysynphot as S
if isinstance(full, str):
im = pyfits.open(full)
else:
im = full
h = im['TEMPL'].header
templ = Table(im['TEMPL'].data)
z = im['ZFIT_STACK'].header['Z_MAP']
# Get coefffs
coeffs, keys, ix = [], [], []
count=0
for k in h:
if k.startswith('CNAME'):
if h[k].startswith('fsps'):
ix.append(count)
keys.append(h[k])
coeffs.append(h[k.replace('CNAME','CVAL')])
count += 1
cov = im['COVAR'].data[np.array(ix),:][:,np.array(ix)]
covd = cov.diagonal()
# Normalize to V band, fsps_QSF_12_v3
normV = np.array([3.75473763e-15, 2.73797790e-15, 1.89469588e-15,
1.32683449e-15, 9.16760812e-16, 2.43922395e-16, 4.76835746e-15,
3.55616962e-15, 2.43745972e-15, 1.61394625e-15, 1.05358710e-15,
5.23733297e-16])
coeffsV = np.array(coeffs)*normV
rmsV = np.sqrt(covd)*normV
rms_norm = rmsV/coeffsV.sum()
coeffs_norm = coeffsV/coeffsV.sum()
param_file = os.path.join(os.path.dirname(__file__), 'data/templates/fsps/fsps_QSF_12_v3.param.fits')
tab_temp = Table.read(param_file)
temp_MLv = tab_temp['mass']/tab_temp['Lv']
temp_SFRv = tab_temp['sfr']
mass_norm = (coeffs_norm*tab_temp['mass']).sum()*u.solMass
Lv_norm = (coeffs_norm*tab_temp['Lv']).sum()*u.solLum
MLv = mass_norm / Lv_norm
SFR_norm = (coeffs_norm*tab_temp['sfr']).sum()*u.solMass/u.yr
SFRv = SFR_norm / Lv_norm
mass_var = ((rms_norm*tab_temp['mass'])**2).sum()
Lv_var = ((rms_norm*tab_temp['Lv'])**2).sum()
SFR_var = ((rms_norm*tab_temp['sfr'])**2).sum()
MLv_var = MLv**2 * (mass_var/mass_norm.value**2 + Lv_var/Lv_norm.value**2)
MLv_rms = np.sqrt(MLv_var)
SFRv_var = SFRv**2 * (SFR_var/SFR_norm.value**2 + Lv_var/Lv_norm.value**2)
SFRv_rms = np.sqrt(SFRv_var)
vband = S.ObsBandpass('v')
vbandz = S.ArrayBandpass(vband.wave*(1+z), vband.throughput)
best_templ = utils.SpectrumTemplate(templ['wave'], templ['full'])
fnu = best_templ.integrate_filter(vbandz)*(u.erg/u.s/u.cm**2/u.Hz)
dL = cosmology.luminosity_distance(z).to(u.cm)
Lnu = fnu*4*np.pi*dL**2
pivotV = vbandz.pivot()*u.Angstrom
nuV = (const.c/pivotV).to(u.Hz)
Lv = (nuV*Lnu).to(u.L_sun)
mass = MLv*Lv
SFR = SFRv*Lv
sps = {'Lv':Lv, 'MLv':MLv, 'MLv_rms':MLv_rms, 'SFRv':SFRv, 'SFRv_rms':SFRv_rms, 'templ':best_templ}
return sps
def _loss(dz, gamma=0.15):
"""Risk / Loss function, Tanaka et al. (https://arxiv.org/abs/1704.05988)
Parameters
----------
gamma : float
Returns
-------
loss : float
"""
return 1-1/(1+(dz/gamma)**2)
def refit_beams(root='j012017+213343', append='x', id=708, keep_dict={'G141':[201, 291]}, poly_order=3, make_products=True, run_fit=True, **kwargs):
"""
Regenerate a MultiBeam object selecting only certiain PAs
Parameters
----------
root : str
Root of the "beams.fits" file to load.
append : str
String to append to the rootname of the updated products.
id : int
Object ID. The input filename is built like
>>> beams_file = '{0}_{1:05d}.beams.fits'.format(root, id)
keep_dict : bool
Dictionary of the PAs/grisms to keep. (See the
`~grizli.multifit.MultiBeam.PA` attribute.)
poly_order : int
Order of the polynomial to fit.
make_products : bool
Make stacked spectra and diagnostic figures.
run_fit : bool
Run the redshift fit on the new products
kwargs : dict
Optional keywords passed to `~grizli.fitting.run_all_parallel`.
Returns
-------
mb : `~grizli.multifit.MultiBeam`
New beam object.
"""
import numpy as np
try:
from grizli import utils, fitting
except:
from . import utils, fitting
mb = MultiBeam('{0}_{1:05d}.beams.fits'.format(root, id), group_name=root)
keep_beams = []
for g in keep_dict:
if g not in mb.PA:
continue
for pa in keep_dict[g]:
if float(pa) in mb.PA[g]:
keep_beams.extend([mb.beams[i] for i in mb.PA[g][float(pa)]])
mb = MultiBeam(keep_beams, group_name=root+append)
mb.write_master_fits()
if not make_products:
return mb
wave = np.linspace(2000,2.5e4,100)
poly_templates = utils.polynomial_templates(wave, order=poly_order)
pfit = mb.template_at_z(z=0, templates=poly_templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
try:
fig1 = mb.oned_figure(figsize=[5,3], tfit=pfit)
fig1.savefig('{0}_{1:05d}.1D.png'.format(root+append, id))
except:
pass
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32, zfit=pfit)
fig.savefig('{0}_{1:05d}.stack.png'.format(root+append, id))
if run_fit:
fitting.run_all_parallel(id, group_name=root+append, root=root+'x', verbose=True, **kwargs)
return mb
class GroupFitter(object):
"""Combine stack.StackFitter and MultiBeam fitting into a single object
Will have to match the attributes between the different objects, which
is already close.
"""
def _test(self):
print(self.Ngrism)
def _get_slices(self, masked=False):
"""Precompute array slices for how the individual components map into the single combined arrays.
Parameters
----------
masked : bool
Return indices of masked arrays rather than simple slices of the
full beams.
Returns
-------
slices : list
List of slices.
"""
x = 0
slices = []
# use masked index arrays rather than slices
if masked:
for i in range(self.N):
beam = self.beams[i]
if beam.fit_mask.sum() == 0:
slices.append(None)
continue
idx = np.arange(beam.fit_mask.sum())+x
slices.append(idx) #[slice(x+0, x+beam.size)][beam.fit_mask])
x = idx[-1]+1
else:
for i in range(self.N):
slices.append(slice(x+0, x+self.beams[i].size))
x += self.beams[i].size
return slices
def _update_beam_mask(self):
"""
Compute versions of the masked arrays
"""
for ib, b in enumerate(self.beams):
b.fit_mask &= self.fit_mask[self.slices[ib]]
self.mslices = self._get_slices(masked=True)
self.Nmask = self.fit_mask.sum()
if hasattr(self, 'Nphot'):
self.Nspec = self.Nmask - self.Nphot
else:
self.Nspec = self.Nmask
def _init_background(self, masked=True):
"""Initialize the (flat) background model components
Parameters
----------
None :
Returns
-------
A_bg : `~np.ndarray`
"""
if masked:
A_bg = np.zeros((self.N, self.Nmask))
for i in range(self.N):
A_bg[i, self.mslices[i]] = 1.
else:
A_bg = np.zeros((self.N, self.Ntot))
for i in range(self.N):
A_bg[i, self.slices[i]] = 1.
return A_bg
def get_SDSS_photometry(self, bands='ugriz', templ=None, radius=2, SDSS_CATALOG='V/147/sdss12', get_panstarrs=False):
#from astroquery.sdss import SDSS
#from astropy import coordinates as coords
import astropy.units as u
from astroquery.vizier import Vizier
import astropy.coordinates as coord
import pysynphot as S
from eazy.templates import Template
from eazy.filters import FilterFile
from eazy.photoz import TemplateGrid
from eazy.filters import FilterDefinition
if get_panstarrs:
SDSS_CATALOG = 'II/349'
bands = 'grizy'
# pos = coords.SkyCoord(self.ra*u.deg, self.dec*u.deg, frame='icrs')
# fields = ['ra','dec','modelMag_r', 'modelMagErr_r']
# for b in bands:
# fields.extend(['modelFlux_'+b, 'modelFluxIvar_'+b])
#
# xid = SDSS.query_region(pos, photoobj_fields=fields, spectro=False, radius=radius*u.arcsec)
from astroquery.vizier import Vizier
import astropy.units as u
import astropy.coordinates as coord
coo = coord.SkyCoord(ra=self.ra, dec=self.dec, unit=(u.deg, u.deg),
frame='icrs')
v = Vizier(catalog=SDSS_CATALOG, columns=['+_r','*'])
try:
tab = v.query_region(coo, radius="{0}s".format(radius),
catalog=SDSS_CATALOG)[0]
ix = np.argmin(tab['rmag'])
tab = tab[ix]
except:
return None
filters = [FilterDefinition(bp=S.ObsBandpass('sdss,{0}'.format(b))) for b in bands]
pivot = {}
for ib, b in enumerate(bands):
pivot[b] = filters[ib].pivot()
#to_flam = 10**(-0.4*(22.5+48.6))*3.e18 # / pivot(Ang)**2
#flam = np.array([xid['modelFlux_{0}'.format(b)][0]*to_flam/pivot[b]**2 for b in bands])
#eflam = np.array([np.sqrt(1/xid['modelFluxIvar_{0}'.format(b)][0])*to_flam/pivot[b]**2 for b in bands])
to_flam = 10**(-0.4*(48.6))*3.e18 # / pivot(Ang)**2
flam = np.array([10**(-0.4*(tab[b+'mag']))*to_flam/pivot[b]**2 for ib, b in enumerate(bands)])
eflam = np.array([tab['e_{0}mag'.format(b)]*np.log(10)/2.5*flam[ib] for ib, b in enumerate(bands)])
phot = {'flam':flam, 'eflam':eflam, 'filters':filters, 'tempfilt':None}
if templ is None:
return phot
# Make fast SDSS template grid
templates = [Template(arrays=[templ[t].wave, templ[t].flux], name=t) for t in templ]
zgrid = utils.log_zgrid(zr=[0.01, 3.4], dz=0.005)
tempfilt = TemplateGrid(zgrid, templates, filters=filters, add_igm=True, galactic_ebv=0, Eb=0, n_proc=0)
#filters = [all_filters.filters[f-1] for f in [156,157,158,159,160]]
phot = {'flam':flam, 'eflam':eflam, 'filters':filters, 'tempfilt':tempfilt}
return phot
### Vizier
def set_photometry(self, flam=[], eflam=[], filters=[], lc=None, force=False, tempfilt=None, min_err=0.02, TEF=None, pz=None, source='unknown'):
"""
Add photometry
"""
if (self.Nphot > 0) & (not force):
print('Photometry already set (Nphot={0})'.format(self.Nphot))
return True
okphot = (eflam > 0) & np.isfinite(eflam) & np.isfinite(flam)
self.Nphot = okphot.sum() #len(flam)
self.Nphotbands = len(eflam)
if self.Nphot == 0:
return True
if (len(flam) != len(eflam)) | (len(flam) != len(filters)):
print('flam/eflam/filters dimensions don\'t match')
return False
self.photom_flam = flam*1
self.photom_eflam = np.sqrt(eflam**2+(min_err*flam)**2)
self.photom_flam[~okphot] = -99
self.photom_eflam[~okphot] = -99
self.photom_filters = filters
self.photom_source = source
self.sivarf = np.hstack([self.sivarf, 1/self.photom_eflam])
self.weightf = np.hstack([self.weightf, np.ones_like(self.photom_eflam)])
self.fit_mask = np.hstack([self.fit_mask, okphot])
self.fit_mask &= self.weightf > 0
#self.flat_flam = np.hstack((self.flat_flam, self.photom_eflam*0.))
# Mask for just spectra
self.fit_mask_spec = self.fit_mask & True
self.fit_mask_spec[-self.Nphotbands:] = False
self.Nmask = self.fit_mask.sum()
self.Nspec = self.Nmask - self.Nphot
self.scif = np.hstack((self.scif, flam))
self.DoF = int((self.weightf*self.fit_mask).sum())
self.is_spec = np.isfinite(self.scif)
self.is_spec[-len(flam):] = False
self.photom_pivot = np.array([filter.pivot() for filter in filters])
self.wavef = np.hstack((self.wavef, self.photom_pivot))
# eazypy tempfilt for faster interpolation
self.tempfilt = tempfilt
self.TEF = TEF
def unset_photometry(self):
if self.Nphot == 0:
return True
Nbands = self.Nphotbands
self.sivarf = self.sivarf[:-Nbands]
self.weightf = self.weightf[:-Nbands]
#self.flat_flam = self.flat_flam[:-Nbands]
self.fit_mask = self.fit_mask[:-Nbands]
self.fit_mask &= self.weightf > 0
self.fit_mask_spec = self.fit_mask & True
self.scif = self.scif[:-Nbands]
self.wavef = self.wavef[:-Nbands]
self.DoF = int((self.weightf*self.fit_mask).sum())
self.is_spec = 1
self.Nphot = 0
self.Nphotbands = 0
self.Nmask = self.fit_mask.sum()
self.Nspec = self.Nmask - self.Nphot
self.tempfilt = None
def _interpolate_photometry(self, z=0., templates=[]):
"""
Interpolate templates through photometric filters
xx: TBD better handling of emission line templates and use eazpy tempfilt
object for huge speedup
"""
NTEMP = len(templates)
A_phot = np.zeros((NTEMP+self.N, len(self.photom_flam))) #self.Nphot))
mask = self.photom_eflam > 0
if (self.tempfilt is not None):
if (self.tempfilt.NTEMP == NTEMP):
#A_spl = self.tempfilt(z)
A_phot[self.N:,:] = self.tempfilt(z)
A_phot *= 3.e18/self.photom_pivot**2*(1+z)
A_phot[~np.isfinite(A_phot)] = 0
return A_phot[:,mask]
for it, key in enumerate(templates):
#print(key)
tz = templates[key].zscale(z, scalar=1)
for ifilt, filt in enumerate(self.photom_filters):
A_phot[self.N+it, ifilt] = tz.integrate_filter(filt)*3.e18/self.photom_pivot[ifilt]**2#*(1+z)
# pl = plt.plot(tz.wave, tz.flux)
# plt.scatter(self.photom_pivot, A_phot[self.N+it,:], color=pl[0].get_color())
return A_phot[:,mask]
def xfit_at_z(self, z=0, templates=[], fitter='nnls', fit_background=True, get_uncertainties=False, get_design_matrix=False, pscale=None, COEFF_SCALE=1.e-19, get_components=False, huber_delta=4, get_residuals=False, include_photometry=True):
"""Fit the 2D spectra with a set of templates at a specified redshift.
Parameters
----------
z : float
Redshift.
templates : list
List of templates to fit.
fitter : str
Minimization algorithm to compute template coefficients.
The default 'nnls' uses non-negative least squares.
The other option is standard 'leastsq'.
fit_background : bool
Fit additive pedestal background offset.
get_uncertainties : bool
Compute coefficient uncertainties from the covariance matrix
get_design_matrix : bool
Return design matrix and data, rather than nominal outputs.
huber_delta : float
Use the Huber loss function (`~scipy.special.huber`) rather than
direct chi-squared. If `huber_delta` < 0, then fall back to chi2.
Returns
-------
chi2 : float
Chi-squared of the fit
coeffs, coeffs_err : `~np.ndarray`
Template coefficients and uncertainties.
covariance : `~np.ndarray`
Full covariance
"""
import scipy.optimize
#import scipy.sparse
from scipy.special import huber
NTEMP = len(templates)
if (self.Nphot > 0) & include_photometry:
A = np.zeros((self.N+NTEMP, self.Nmask))
else:
A = np.zeros((self.N+NTEMP, self.Nspec))
if fit_background:
A[:self.N,:self.Nspec] = self.A_bgm
lower_bound = np.zeros(self.N+NTEMP)
lower_bound[:self.N] = -0.05
upper_bound = np.ones(self.N+NTEMP)*np.inf
upper_bound[:self.N] = 0.05
# A = scipy.sparse.csr_matrix((self.N+NTEMP, self.Ntot))
# bg_sp = scipy.sparse.csc_matrix(self.A_bg)
for i, t in enumerate(templates):
if t.startswith('line'):
lower_bound[self.N+i] = -np.inf
ti = templates[t]
if z > IGM_MINZ:
if IGM is None:
igmz = 1.
else:
lylim = ti.wave < 1250
igmz = np.ones_like(ti.wave)
igmz[lylim] = IGM.full_IGM(z, ti.wave[lylim]*(1+z))
else:
igmz = 1.
# Don't redshift spline templates
if ti.name.startswith('bspl'):
s = [ti.wave, ti.flux*igmz]
else:
s = [ti.wave*(1+z), ti.flux/(1+z)*igmz]
for j, beam in enumerate(self.beams):
mask_i = beam.fit_mask.reshape(beam.sh)
clip = mask_i.sum(axis=0) > 0
if clip.sum() == 0:
continue
lam_beam = beam.wave[clip]
if ((s[0].min() > lam_beam.max()) |
(s[0].max() < lam_beam.min())):
continue
sl = self.mslices[j]
if t in beam.thumbs:
#print('Use thumbnail!', t)
A[self.N+i, sl] = beam.compute_model(thumb=beam.thumbs[t], spectrum_1d=s, in_place=False, is_cgs=True)[beam.fit_mask]*COEFF_SCALE
else:
A[self.N+i, sl] = beam.compute_model(spectrum_1d=s, in_place=False, is_cgs=True)[beam.fit_mask]*COEFF_SCALE
# if j == 0:
# m = beam.compute_model(spectrum_1d=s, in_place=False, is_cgs=True)
# ds9.frame(i)
# ds9.view(m.reshape(beam.sh))
if fit_background:
if fitter in ['nnls', 'lstsq']:
pedestal = 0.04
else:
pedestal = 0.
else:
pedestal = 0
#oktemp = (A*self.fit_mask).sum(axis=1) != 0
oktemp = A.sum(axis=1) != 0
# Photometry
if (self.Nphot > 0):
if include_photometry:
A_phot = self._interpolate_photometry(z=z,
templates=templates)
A[:,-self.Nphot:] = A_phot*COEFF_SCALE #np.hstack((A, A_phot))
full_fit_mask = self.fit_mask
else:
full_fit_mask = self.fit_mask_spec
else:
full_fit_mask = self.fit_mask
# Weight design matrix and data by 1/sigma
#Ax = A[oktemp,:]*self.sivarf[full_fit_mask]
# Include `weight` variable to account for contamination
sivarf = self.sivarf*np.sqrt(self.weightf)
Ax = A[oktemp,:]*sivarf[full_fit_mask]
#AxT = Ax[:,full_fit_mask].T
# Scale photometry
if hasattr(self, 'pscale'):
if (self.pscale is not None):
scale = self.compute_scale_array(self.pscale, self.wavef[full_fit_mask])
if self.Nphot > 0:
scale[-self.Nphot:] = 1.
Ax *= scale
if fit_background:
for i in range(self.N):
Ax[i,:] /= scale
# Need transpose
AxT = Ax.T
# Masked data array, including background pedestal
data = ((self.scif+pedestal*self.is_spec)*sivarf)[full_fit_mask]
if get_design_matrix:
return AxT, data
# Run the minimization
if fitter == 'nnls':
coeffs_i, rnorm = scipy.optimize.nnls(AxT, data)
elif fitter == 'lstsq':
coeffs_i, residuals, rank, s = np.linalg.lstsq(AxT, data, rcond=None)
else:
# Bounded Least Squares
lsq_out = scipy.optimize.lsq_linear(AxT, data, bounds=(lower_bound[oktemp], upper_bound[oktemp]), method='bvls', tol=1.e-8)
coeffs_i = lsq_out.x
if False:
r = AxT.dot(coeffs_i) - data
# Compute background array
if fit_background:
background = np.dot(coeffs_i[:self.N], A[:self.N,:]) - pedestal
if self.Nphot > 0:
background[-self.Nphot:] = 0.
coeffs_i[:self.N] -= pedestal
else:
background = self.scif[full_fit_mask]*0.
# Full model
if fit_background:
model = np.dot(coeffs_i[self.N:], Ax[self.N:,:]/sivarf[full_fit_mask])
else:
model = np.dot(coeffs_i, Ax/sivarf[full_fit_mask])
# Model photometry
if self.Nphot > 0:
self.photom_model = model[-self.Nphot:]*1
# Residuals and Chi-squared
resid = self.scif[full_fit_mask] - model - background
if get_components:
return model, background
#chi2 = np.sum(resid[full_fit_mask]**2*self.sivarf[full_fit_mask]**2)
norm_resid = resid*(sivarf)[full_fit_mask]
# Use Huber loss function rather than direct chi2
if get_residuals:
chi2 = norm_resid
else:
if huber_delta > 0:
chi2 = huber(huber_delta, norm_resid)*2.
else:
chi2 = norm_resid**2
chi2 = np.sum(chi2)
# Uncertainties from covariance matrix
if get_uncertainties:
try:
# Covariance is inverse of AT.A
covar_i = np.matrix(np.dot(AxT.T, AxT)).I.A
covar = utils.fill_masked_covar(covar_i, oktemp)
covard = np.sqrt(covar.diagonal())
# Compute covariances after masking templates with coeffs = 0
if get_uncertainties == 2:
nonzero = coeffs_i != 0
if nonzero.sum() > 0:
AxTm = AxT[:,nonzero]
#mcoeffs_i, rnorm = scipy.optimize.nnls(AxTm, data)
#mcoeffs_i[:self.N] -= pedestal
mcovar_i = np.matrix(np.dot(AxTm.T, AxTm)).I.A
mcovar = utils.fill_masked_covar(mcovar_i, nonzero)
mcovar = utils.fill_masked_covar(mcovar, oktemp)
mcovard = np.sqrt(mcovar.diagonal())
covar = mcovar
covard = mcovard
except:
print('Except: covar!')
covar = np.zeros((self.N+NTEMP, self.N+NTEMP))
covard = np.zeros(self.N+NTEMP)#-1.
mcovard = covard
else:
covar = np.zeros((self.N+NTEMP, self.N+NTEMP))
covard = np.zeros(self.N+NTEMP)#-1.
coeffs = np.zeros(self.N+NTEMP)
coeffs[oktemp] = coeffs_i #[self.N:]] = coeffs[self.N:]
coeffs_err = covard #np.zeros(NTEMP)
#full_coeffs_err[oktemp[self.N:]] = covard[self.N:]
del(A); del(Ax); del(AxT)
#if fit_background:
coeffs[self.N:] *= COEFF_SCALE
coeffs_err[self.N:] *= COEFF_SCALE
#covar[self.N:,self.N:] *= COEFF_SCALE**2
covar[self.N:,:] *= COEFF_SCALE
covar[:,self.N:] *= COEFF_SCALE
return chi2, coeffs, coeffs_err, covar
def xfit_redshift(self, prior=None, fwhm=1200,
make_figure=True, zr=[0.65, 1.6], dz=[0.005, 0.0004],
verbose=True, fit_background=True, fitter='nnls',
delta_chi2_threshold=0.004, poly_order=3, zoom=True,
line_complexes=True, templates={}, figsize=[8,5],
fsps_templates=False, get_uncertainties=True,
Rspline=30, huber_delta=4, get_student_logpdf=False):
"""TBD
"""
from scipy import polyfit, polyval
from scipy.stats import t as student_t
from scipy.special import huber
if zr is 0:
stars = True
zr = [0, 0.01]
fitter='nnls'
else:
stars = False
zgrid = utils.log_zgrid(zr, dz=dz[0])
NZ = len(zgrid)
#### Polynomial SED fit
wpoly = np.linspace(1000,5.e4,1000)
# tpoly = utils.polynomial_templates(wpoly, line=True)
# out = self.xfit_at_z(z=0., templates=tpoly, fitter='nnls',
# fit_background=True, get_uncertainties=False)
tpoly = utils.polynomial_templates(wpoly, order=poly_order,
line=False)
out = self.xfit_at_z(z=0., templates=tpoly, fitter='lstsq',
fit_background=True, get_uncertainties=False,
include_photometry=False, huber_delta=huber_delta)
chi2_poly, coeffs_poly, err_poly, cov = out
#### Spline SED fit
wspline = np.arange(4200, 2.5e4)
#Rspline = 30
df_spl = len(utils.log_zgrid(zr=[wspline[0], wspline[-1]], dz=1./Rspline))
tspline = utils.bspline_templates(wspline, df=df_spl+2, log=True, clip=0.0001)
out = self.xfit_at_z(z=0., templates=tspline, fitter='lstsq',
fit_background=True, get_uncertainties=True,
include_photometry=False, get_residuals=True)
spline_resid, coeffs_spline, err_spline, cov = out
if huber_delta > 0:
chi2_spline = (huber(huber_delta, spline_resid)*2.).sum()
else:
chi2_spline = (spline_resid**2).sum()
student_t_pars = student_t.fit(spline_resid)
#poly1d, xxx = utils.dot_templates(coeffs_poly[self.N:], tpoly, z=0)
# tpoly = utils.polynomial_templates(wpoly, order=3)
# out = self.xfit_at_z(z=0., templates=tpoly, fitter='lstsq',
# fit_background=True)
# chi2_poly, coeffs_poly, c, cov = out
# if True:
# cp, lp = utils.dot_templates(coeffs_poly[self.N:], tpoly)
### Set up for template fit
if templates == {}:
templates = utils.load_templates(fwhm=fwhm, stars=stars, line_complexes=line_complexes, fsps_templates=fsps_templates)
else:
if verbose:
print('User templates! N={0} \n'.format(len(templates)))
NTEMP = len(templates)
out = self.xfit_at_z(z=0., templates=templates, fitter=fitter,
fit_background=fit_background,
get_uncertainties=False)
chi2, coeffs, coeffs_err, covar = out
chi2 = np.zeros(NZ)
logpdf = np.zeros(NZ)
coeffs = np.zeros((NZ, coeffs.shape[0]))
covar = np.zeros((NZ, covar.shape[0], covar.shape[1]))
chi2min = 1e30
iz = 0
for i in range(NZ):
out = self.xfit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=True)
fit_resid, coeffs[i,:], coeffs_err, covar[i,:,:] = out
if huber_delta > 0:
chi2[i] = (huber(huber_delta, fit_resid)*2.).sum()
else:
chi2[i] = (fit_resid**2).sum()
if get_student_logpdf:
logpdf[i] = student_t.logpdf(fit_resid, *student_t_pars).sum()
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
if verbose:
print(utils.NO_NEWLINE + ' {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'.format(zgrid[i], chi2[i], zgrid[iz], i+1, NZ))
if verbose:
print('First iteration: z_best={0:.4f}\n'.format(zgrid[iz]))
## Find peaks
import peakutils
# Make "negative" chi2 for peak-finding
#chi2_test = chi2_poly
chi2_test = chi2_spline
if chi2_test > (chi2.min()+100):
chi2_rev = (chi2.min() + 100 - chi2)/self.DoF
elif chi2_test < (chi2.min() + 9):
chi2_rev = (chi2.min() + 16 - chi2)/self.DoF
else:
chi2_rev = (chi2_test - chi2)/self.DoF
chi2_rev[chi2_rev < 0] = 0
indexes = peakutils.indexes(chi2_rev, thres=0.4, min_dist=8)
num_peaks = len(indexes)
if False:
plt.plot(zgrid, (chi2-chi2.min())/ self.DoF)
plt.scatter(zgrid[indexes], (chi2-chi2.min())[indexes]/ self.DoF, color='r')
# delta_chi2 = (chi2.max()-chi2.min())/self.DoF
# if delta_chi2 > delta_chi2_threshold:
if (num_peaks > 0) & (not stars) & zoom:
zgrid_zoom = []
for ix in indexes:
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zi = -c[1]/(2*c[0])
chi_i = polyval(c, zi)
zgrid_zoom.extend(np.arange(zi-2*dz[0],
zi+2*dz[0]+dz[1]/10., dz[1]))
# zgrid_zoom = utils.zoom_zgrid(zgrid, chi2/self.DoF,
# threshold=delta_chi2_threshold,
# factor=dz[0]/dz[1])
NZOOM = len(zgrid_zoom)
chi2_zoom = np.zeros(NZOOM)
logpdf_zoom = np.zeros(NZOOM)
coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
covar_zoom = np.zeros((NZOOM, coeffs.shape[1], covar.shape[2]))
iz = 0
chi2min = 1.e30
for i in range(NZOOM):
out = self.xfit_at_z(z=zgrid_zoom[i], templates=templates,
fitter=fitter,
fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=True)
fit_resid, coeffs_zoom[i,:], e, covar_zoom[i,:,:] = out
if huber_delta > 0:
chi2_zoom[i] = (huber(huber_delta, fit_resid)*2.).sum()
else:
chi2_zoom[i] = (fit_resid**2).sum()
if get_student_logpdf:
logpdf_zoom[i] = student_t.logpdf(fit_resid,
*student_t_pars).sum()
#A, coeffs_zoom[i,:], chi2_zoom[i], model_2d = out
if chi2_zoom[i] < chi2min:
chi2min = chi2_zoom[i]
iz = i
if verbose:
print(utils.NO_NEWLINE+'- {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'.format(zgrid_zoom[i], chi2_zoom[i], zgrid_zoom[iz], i+1, NZOOM))
zgrid = np.append(zgrid, zgrid_zoom)
chi2 = np.append(chi2, chi2_zoom)
logpdf = np.append(logpdf, logpdf_zoom)
coeffs = np.append(coeffs, coeffs_zoom, axis=0)
covar = np.vstack((covar, covar_zoom))
so = np.argsort(zgrid)
zgrid = zgrid[so]
chi2 = chi2[so]
logpdf = logpdf[so]
coeffs = coeffs[so,:]
covar = covar[so,:,:]
fit = utils.GTable()
fit.meta['N'] = (self.N, 'Number of spectrum extensions')
fit.meta['polyord'] = (poly_order, 'Order polynomial fit')
fit.meta['chi2poly'] = (chi2_poly, 'Chi^2 of polynomial fit')
kspl = (coeffs_spline != 0).sum()
fit.meta['chi2spl'] = (chi2_spline, 'Chi^2 of spline fit')
fit.meta['kspl'] = (kspl, 'Parameters, k, of spline fit')
# Evaluate spline at wavelengths for stars
xspline = np.array([8100, 9000, 1.27e4, 1.4e4])
flux_spline = utils.eval_bspline_templates(xspline, tspline, coeffs_spline[self.N:])
fluxerr_spline = utils.eval_bspline_templates(xspline, tspline, err_spline[self.N:])
for i in range(len(xspline)):
fit.meta['splf{0:02d}'.format(i+1)] = flux_spline[i], 'Spline flux at {0:.2f} um'.format(xspline[i]/1.e4)
fit.meta['sple{0:02d}'.format(i+1)] = fluxerr_spline[i], 'Spline flux err at {0:.2f} um'.format(xspline[i]/1.e4)
izbest = np.argmin(chi2)
clip = coeffs[izbest,:] != 0
ktempl = clip.sum()
fit.meta['NTEMP'] = (len(templates), 'Number of fitting templates')
fit.meta['DoF'] = (self.DoF, 'Degrees of freedom (number of pixels)')
fit.meta['ktempl'] = (ktempl, 'Parameters, k, of template fit')
fit.meta['chimin'] = (chi2.min(), 'Minimum chi2 of template fit')
fit.meta['chimax'] = (chi2.max(), 'Maximum chi2 of template fit')
fit.meta['fitter'] = (fitter, 'Minimization algorithm')
# Bayesian information criteria, normalized to template min_chi2
# BIC = log(number of data points)*(number of params) + min(chi2) + C
# https://en.wikipedia.org/wiki/Bayesian_information_criterion
scale_chinu = self.DoF/chi2.min()
scale_chinu = 1 # Don't rescale
fit.meta['bic_poly'] = np.log(self.DoF)*(poly_order+1+self.N) + (chi2_poly-chi2.min())*scale_chinu, 'BIC of polynomial fit'
fit.meta['bic_spl'] = np.log(self.DoF)*kspl + (chi2_spline-chi2.min())*scale_chinu, 'BIC of spline fit'
fit.meta['bic_temp'] = np.log(self.DoF)*ktempl, 'BIC of template fit'
for i, tname in enumerate(templates):
fit.meta['T{0:03d}NAME'.format(i+1)] = (templates[tname].name, 'Template name')
if tname.startswith('line '):
fit.meta['T{0:03d}FWHM'.format(i+1)] = (templates[tname].fwhm, 'FWHM, if emission line')
dtype = np.float64
fit['zgrid'] = np.cast[dtype](zgrid)
fit['chi2'] = np.cast[dtype](chi2)
if get_student_logpdf:
fit['student_logpdf'] = np.cast[dtype](logpdf)
fit.meta['t_df'] = student_t_pars[0], 'Student-t df'
fit.meta['t_loc'] = student_t_pars[1], 'Student-t loc'
fit.meta['t_scale'] = student_t_pars[2], 'Student-t scale'
#fit['chi2poly'] = chi2_poly
fit['coeffs'] = np.cast[dtype](coeffs)
fit['covar'] = np.cast[dtype](covar)
fit = self._parse_zfit_output(fit, prior=prior)
return fit
def _parse_zfit_output(self, fit, prior=None):
"""Parse best-fit redshift, etc.
TBD
"""
import scipy.interpolate
# Normalize to min(chi2)/DoF = 1.
scl_nu = fit['chi2'].min()/self.DoF
# PDF
pdf = np.exp(-0.5*(fit['chi2']-fit['chi2'].min())/scl_nu)
if prior is not None:
interp_prior = np.interp(fit['zgrid'], prior[0], prior[1])
pdf *= interp_prior
fit.meta['hasprior'] = True, 'Prior applied to PDF'
fit['prior'] = interp_prior
else:
interp_prior = None
fit.meta['hasprior'] = False, 'Prior applied to PDF'
# Normalize PDF
pdf /= np.trapz(pdf, fit['zgrid'])
# Interpolate pdf for more continuous measurement
spl = scipy.interpolate.Akima1DInterpolator(fit['zgrid'], np.log(pdf), axis=1)
zfine = utils.log_zgrid(zr=[fit['zgrid'].min(), fit['zgrid'].max()], dz=0.0001)
ok = np.isfinite(spl(zfine))
norm = np.trapz(np.exp(spl(zfine[ok])), zfine[ok])
# Compute CDF and probability intervals
dz = np.gradient(zfine[ok])
cdf = np.cumsum(np.exp(spl(zfine[ok]))*dz/norm)
pz_percentiles = np.interp(np.array([2.5, 16, 50, 84, 97.5])/100., cdf, zfine[ok])
# Random draws, testing
#rnd = np.interp(np.random.rand(1000), cdf, fit['zgrid']+dz/2.)
dz = np.gradient(fit['zgrid'])
gamma = 0.15
zsq = np.dot(fit['zgrid'][:,None], np.ones_like(fit['zgrid'])[None,:])
L = _loss((zsq-fit['zgrid'])/(1+fit['zgrid']), gamma=gamma)
risk = np.dot(pdf*L, dz)
zi = np.argmin(risk)
#print('xxx', zi, len(risk))
if (zi < len(risk)-1) & (zi > 0):
c = np.polyfit(fit['zgrid'][zi-1:zi+2], risk[zi-1:zi+2], 2)
z_risk = -c[1]/(2*c[0])
else:
z_risk = fit['zgrid'][zi]
min_risk = np.trapz(pdf*_loss((z_risk-fit['zgrid'])/(1+fit['zgrid']), gamma=gamma), fit['zgrid'])
# MAP, maximum p(z)
zi = np.argmax(pdf)
if (zi < len(pdf)-1) & (zi > 0):
c = np.polyfit(fit['zgrid'][zi-1:zi+2], pdf[zi-1:zi+2], 2)
z_map = -c[1]/(2*c[0])
else:
z_map = fit['zgrid'][zi]
# Store data in the fit table
fit['pdf'] = pdf
fit['risk'] = risk
fit.meta['Z02'] = pz_percentiles[0], 'Integrated p(z) = 0.025'
fit.meta['Z16'] = pz_percentiles[1], 'Integrated p(z) = 0.16'
fit.meta['Z50'] = pz_percentiles[2], 'Integrated p(z) = 0.5'
fit.meta['Z84'] = pz_percentiles[3], 'Integrated p(z) = 0.84'
fit.meta['Z97'] = pz_percentiles[4], 'Integrated p(z) = 0.975'
fit.meta['ZWIDTH1'] = pz_percentiles[3]-pz_percentiles[1], 'Width between the 16th and 84th p(z) percentiles'
fit.meta['ZWIDTH2'] = pz_percentiles[4]-pz_percentiles[0], 'Width between the 2.5th and 97.5th p(z) percentiles'
fit.meta['z_map'] = z_map, 'Redshift at MAX(PDF)'
fit.meta['z_risk'] = z_risk, 'Redshift at minimum risk'
fit.meta['min_risk'] = min_risk, 'Minimum risk'
fit.meta['gam_loss'] = gamma, 'Gamma factor of the risk/loss function'
return fit
def template_at_z(self, z=0, templates=None, fit_background=True, fitter='nnls', fwhm=1400, get_uncertainties=2, get_residuals=False, include_photometry=True, draws=0):
"""TBD
"""
if templates is None:
templates = utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
out = self.xfit_at_z(z=z, templates=templates, fitter=fitter,
fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=get_residuals,
include_photometry=include_photometry)
chi2, coeffs, coeffs_err, covar = out
cont1d, line1d = utils.dot_templates(coeffs[self.N:], templates, z=z,
apply_igm=(z > IGM_MINZ))
# Parse template coeffs
cfit = OrderedDict()
for i in range(self.N):
cfit['bg {0:03d}'.format(i)] = coeffs[i], coeffs_err[i]
for j, key in enumerate(templates):
i = j+self.N
cfit[key] = coeffs[i], coeffs_err[i]
if False:
# Compare drizzled and beam fits (very close)
for j, key in enumerate(templates):
print('{key:<16s} {0:.2e} {1:.2e} {2:.2e} {3:.2e}'.format(mb_cfit[key][0], mb_cfit[key][1], st_cfit[key][0], st_cfit[key][1], key=key))
tfit = OrderedDict()
tfit['cont1d'] = cont1d
tfit['line1d'] = line1d
tfit['cfit'] = cfit
tfit['coeffs'] = coeffs
tfit['chi2'] = chi2
tfit['covar'] = covar
tfit['z'] = z
tfit['templates'] = templates
if draws > 0:
xte, yte, lte = utils.array_templates(templates, max_R=5000, z=z)
err = np.sqrt(covar.diagonal())
nonzero = err > 0
cov_norm = ((covar/err).T/err)[nonzero,:][:,nonzero]
draw_coeff = np.zeros((draws, len(err)))
draw_coeff[:,nonzero] = np.random.multivariate_normal((coeffs/err)[nonzero], cov_norm, draws)*err[nonzero]
draw_spec = draw_coeff[:,self.N:].dot(yte)
err_spec = np.diff(np.percentile(draw_spec, [16,84], axis=0), axis=0).flatten()/2.
tfit['line1d_err'] = err_spec
return tfit #cont1d, line1d, cfit, covar
### Random draws
# Unique wavelengths
wfull = np.hstack([templates[key].wave for key in templates])
w = np.unique(wfull)
so = np.argsort(w)
w = w[so]
xclip = (w*(1+z) > 7000) & (w*(1+z) < 1.8e4)
temp = []
for key in templates:
if key.startswith('bspl'):
temp.append(grizli.utils_c.interp.interp_conserve_c(w[xclip]/(1+z), templates[key].wave, templates[key].flux))
else:
temp.append(grizli.utils_c.interp.interp_conserve_c(w[xclip], templates[key].wave, templates[key].flux))
temp = np.vstack(temp)
#array([) for key in templates])
clip = coeffs_err[self.N:] > 0
covar_clip = covar[self.N:,self.N:][clip,:][:,clip]
draws = np.random.multivariate_normal(coeffs[self.N:][clip], covar_clip, size=100)
tdraw = np.dot(draws, temp[clip,:])/(1+z)
for ib, beam in enumerate(self.beams):
ww, ff, ee = beam.optimal_extract(beam.sci - beam.contam - coeffs[ib])
plt.errorbar(ww, ff/beam.sens, ee/beam.sens, color='k', marker='.', linestyle='None', alpha=0.5)
for i in range(tdraw.shape[0]):
sp = [w[xclip]*(1+z), tdraw[i,:]]
m = beam.compute_model(spectrum_1d=sp, is_cgs=True, in_place=False).reshape(beam.sh)
ww, ff, ee = beam.optimal_extract(m)
plt.plot(ww, ff/beam.sens, color='r', alpha=0.05)
plt.plot(w[xclip]*(1+z), tdraw.T, alpha=0.05, color='r')
def xmake_fit_plot(self, fit, tfit, show_beams=True, bin=1, minor=0.1,
scale_on_stacked_1d=True):
"""TBD
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec
from matplotlib.ticker import MultipleLocator
import grizli.model
# Initialize plot window
Ng = len(self.grisms)
gs = matplotlib.gridspec.GridSpec(1,2,
width_ratios=[1,1.5+0.5*(Ng>1)],
hspace=0.)
fig = plt.figure(figsize=[8+4*(Ng>1), 3.5])
# p(z)
axz = fig.add_subplot(gs[-1,0]) #121)
axz.text(0.95, 0.96, self.group_name + '\n'+'ID={0:<5d} z={1:.4f}'.format(self.id, fit.meta['z_map'][0]), ha='right', va='top', transform=axz.transAxes, fontsize=9)
axz.plot(fit['zgrid'], np.log10(fit['pdf']), color='k')
#axz.fill_between(z, (chi2-chi2.min())/scale_nu, 27, color='k', alpha=0.5)
axz.set_xlabel(r'$z$')
axz.set_ylabel(r'$\log\ p(z)$'+' / '+ r'$\chi^2=\frac{{{0:.0f}}}{{{1:d}}}={2:.2f}$'.format(fit.meta['chimin'][0], fit.meta['DoF'][0], fit.meta['chimin'][0]/fit.meta['DoF'][0]))
#axz.set_yticks([1,4,9,16,25])
axz.set_xlim(fit['zgrid'].min(), fit['zgrid'].max())
pzmax = np.log10(fit['pdf'].max())
axz.set_ylim(pzmax-6, pzmax+0.8)
axz.grid()
axz.yaxis.set_major_locator(MultipleLocator(base=1))
#### Spectra
axc = fig.add_subplot(gs[-1,1]) #224)
self.oned_figure(bin=bin, show_beams=show_beams, minor=minor, tfit=tfit, axc=axc, scale_on_stacked=scale_on_stacked_1d)
gs.tight_layout(fig, pad=0.1, w_pad=0.1)
return fig
def process_zfit(self, zgrid, chi2, prior=None):
"""Parse redshift fit"""
zbest = zgrid[np.argmin(chi2)]
###############
if prior is not None:
#print('\n\nPrior!\n\n', chi2.min(), prior[1].min())
interp_prior = np.interp(zgrid, prior[0], prior[1])
chi2 += interp_prior
else:
interp_prior = None
print(' Zoom iteration: z_best={0:.4f}\n'.format(zgrid[np.argmin(chi2)]))
### Best redshift
if not stars:
templates = utils.load_templates(line_complexes=False, fwhm=fwhm, fsps_templates=fsps_templates)
zbest = zgrid[np.argmin(chi2)]
ix = np.argmin(chi2)
chibest = chi2.min()
## Fit parabola
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zbest = -c[1]/(2*c[0])
chibest = polyval(c, zbest)
out = self.fit_at_z(z=zbest, templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_full, chi2_best, model_full = out
# Parse results
out2 = self.parse_fit_outputs(zbest, templates, coeffs_full, A)
line_flux, covar, cont1d, line1d, model1d, model_continuum = out2
# Output dictionary with fit parameters
fit_data = OrderedDict()
fit_data['poly_order'] = poly_order
fit_data['fwhm'] = fwhm
fit_data['zbest'] = zbest
fit_data['chibest'] = chibest
fit_data['chi_poly'] = chi2_poly
fit_data['zgrid'] = zgrid
fit_data['prior'] = interp_prior
fit_data['A'] = A
fit_data['coeffs'] = coeffs
fit_data['chi2'] = chi2
fit_data['DoF'] = self.DoF
fit_data['model_full'] = model_full
fit_data['coeffs_full'] = coeffs_full
fit_data['covar'] = covar
fit_data['line_flux'] = line_flux
#fit_data['templates_full'] = templates
fit_data['model_cont'] = model_continuum
fit_data['model1d'] = model1d
fit_data['cont1d'] = cont1d
fit_data['line1d'] = line1d
def scale_to_photometry(self, tfit=None, tol=1.e-4, order=0, init=None, fit_background=True, Rspline=50, use_fit=True, **kwargs):
"""Compute scale factor between spectra and photometry
method : 'Powell' or 'BFGS' work well, latter a bit faster but less robust
New implementation of Levenberg-Markwardt minimization
TBD
"""
from scipy.optimize import minimize, least_squares
if self.Nphot == 0:
return np.array([10.])
if (tfit is None) & (fit_background):
wspline = np.arange(4200, 2.5e4)
#Rspline = 50
df_spl = len(utils.log_zgrid(zr=[wspline[0], wspline[-1]], dz=1./Rspline))
tspline = utils.bspline_templates(wspline, df=df_spl+2, log=True, clip=0.0001)
tfit = self.template_at_z(z=0, templates=tspline, include_photometry=False, fit_background=fit_background, draws=1000)
if use_fit:
oned = self.oned_spectrum(tfit=tfit)
wmi = np.min([oned[k]['wave'].min() for k in oned])
wma = np.max([oned[k]['wave'].max() for k in oned])
clip = (tfit['line1d'].wave > wmi) & (tfit['line1d'].wave < wma) & (tfit['line1d_err'] > 0)
spl_temp = utils.SpectrumTemplate(wave=tfit['line1d'].wave[clip], flux=tfit['line1d'].flux[clip], err=tfit['line1d_err'][clip])
args = (self, {'spl':spl_temp})
else:
oned = self.oned_spectrum(tfit=tfit)
args = (self, oned)
if init is None:
init = np.zeros(order+1)
init[0] = 10.
scale_fit = least_squares(self._objective_scale_direct, init, jac='2-point', method='lm', ftol=tol, xtol=tol, gtol=tol, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=args, kwargs={})
# pscale = scale_fit.x
return scale_fit
def _old_scale_to_photometry(self, z=0, templates={}, tol=1.e-4, order=0, init=None, method='lm', fit_background=True):
"""Compute scale factor between spectra and photometry
method : 'Powell' or 'BFGS' work well, latter a bit faster but less robust
New implementation of Levenberg-Markwardt minimization
TBD
"""
from scipy.optimize import minimize, least_squares
if self.Nphot == 0:
return np.array([10.])
AxT, data = self.xfit_at_z(z=z, templates=templates, fitter='nnls',
fit_background=fit_background,
get_uncertainties=False,
get_design_matrix=True)
if init is None:
init = np.zeros(order+1)
init[0] = 10.
if method == 'lm':
scale_fit = least_squares(self.objfun_scale, init, jac='2-point', method='lm', ftol=tol, xtol=tol, gtol=tol, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=(AxT, data, self, 'resid'), kwargs={})
else:
scale_fit = minimize(self.objfun_scale, init, args=(AxT, data, self, 'chi2'), method=method, jac=None, hess=None, hessp=None, tol=tol, callback=None, options=None)
# pscale = scale_fit.x
return scale_fit
@staticmethod
def compute_scale_array(pscale, wave):
"""Return the scale array given the input coefficients
TBD
"""
N = len(pscale)
rescale = 10**(np.arange(N)+1)
return np.polyval((pscale/rescale)[::-1], (wave-1.e4)/1000.)
@staticmethod
def objfun_scale(pscale, AxT, data, self, retval):
"""
Objective function for fitting for a scale term between photometry and
spectra
"""
import scipy.optimize
from scipy import polyval
scale = self.compute_scale_array(pscale, self.wavef[self.fit_mask])
scale[-self.Nphot:] = 1.
Ax = (AxT.T*scale)
# Remove scaling from background component
for i in range(self.N):
Ax[i,:] /= scale
coeffs, rnorm = scipy.optimize.nnls(Ax.T, data)
#coeffs, rnorm, rank, s = np.linalg.lstsq(Ax.T, data)
full = np.dot(coeffs, Ax)
resid = data - full# - background
chi2 = np.sum(resid**2*self.weightf[self.fit_mask])
print('{0} {1:.1f}'.format(' '.join(['{0:6.2f}'.format(p) for p in pscale]), chi2))
if retval == 'resid':
return resid*np.sqrt(self.weightf[self.fit_mask])
if retval == 'coeffs':
return coeffs, full, resid, chi2, AxT
else:
return chi2
@staticmethod
def _objective_scale_direct(pscale, self, oned):
from eazy.filters import FilterDefinition
flam = []
eflam = []
spec_flux = []
filters = []
for filt in self.photom_filters:
clip = filt.throughput > 0.001*filt.throughput.max()
filters.append(FilterDefinition(name=filt.name,
wave=filt.wave[clip],
throughput=filt.throughput[clip]))
filters = np.array(filters)
lc = self.photom_pivot
for k in oned:
#spec, okfilt, lc = spec1d[k]
# Covered filters
if isinstance(oned[k], utils.SpectrumTemplate):
spec1 = utils.SpectrumTemplate(wave=oned[k].wave, flux=3.e18/oned[k].wave**2)
else:
spec1 = utils.SpectrumTemplate(wave=oned[k]['wave'], flux=3.e18/oned[k]['wave']**2)
flux1 = np.array([spec1.integrate_filter(filt, use_wave='filter') for filt in filters])
okfilt = flux1 > 0.98
if okfilt.sum() == 0:
#print('scale_to_photometry: no filters overlap '+k)
continue
if isinstance(oned[k], utils.SpectrumTemplate):
scale = 1./self.compute_scale_array(pscale, oned[k].wave)
spec = utils.SpectrumTemplate(wave=oned[k].wave, flux=oned[k].flux*scale, err=oned[k].err*scale)
else:
scale = 1./self.compute_scale_array(pscale, oned[k]['wave'])
spec = utils.SpectrumTemplate(wave=oned[k]['wave'], flux=oned[k]['flux']*scale/np.maximum(oned[k]['flat'], 1), err=oned[k]['err']*scale/np.maximum(oned[k]['flat'], 1))
spec_flux.append((np.array([spec.integrate_filter(filt, use_wave='templ') for filt in filters[okfilt]]).T*3.e18/lc[okfilt]**2).T)
flam.append(self.photom_flam[okfilt])
eflam.append(self.photom_eflam[okfilt])
if not flam:
return [0]
spec_flux = np.vstack(spec_flux)
flam = np.hstack(flam)
eflam = np.hstack(eflam)
chi2 = (flam-spec_flux[:,0])**2/(eflam**2+spec_flux[:,1]**2)
#print(pscale, chi2.sum())
return chi2
def xfit_star(self, tstar=None, spline_correction=True, fitter='nnls', fit_background=True, spline_args={'Rspline':5}, oned_args={}):
"""Fit stellar templates
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec
from matplotlib.ticker import MultipleLocator
#self = grizli.multifit.MultiBeam('ers-grism_{0:05d}.beams.fits'.format(id), fcontam=0.2, psf=True)
#self.extend(grizli.multifit.MultiBeam('/Volumes/Pegasus/Grizli/ACS/goodss/Prep/ers-grism-pears_{0:05d}.beams.fits'.format(id), fcontam=0.2))
if tstar is None:
tstar = utils.load_templates(fwhm=1200, line_complexes=True, fsps_templates=True, stars=True)
NTEMP = len(tstar)
#covar = np.zeros((NTEMP, self.N+1, self.N+1))
#coeffs = np.zeros((NTEMP, self.N+1))
chi2 = | np.zeros(NTEMP) | numpy.zeros |
# Class definitions for equations to be tested
# from path import *
import numpy as np
import centpy
###############
# 1D equations
###############
# Burgers equation
class Burgers1d(centpy.Equation1d):
def initial_data(self):
return np.sin(self.x) + 0.5 * np.sin(0.5 * self.x)
def boundary_conditions(self, u):
u[0] = u[-4]
u[1] = u[-3]
u[-2] = u[2]
u[-1] = u[3]
def flux_x(self, u):
return 0.5 * u * u
def spectral_radius_x(self, u):
return np.abs(u)
# Euler equation
class Euler1d(centpy.Equation1d):
def initial_data(self):
u = np.zeros((self.J + 4, 3))
midpoint = int(self.J / 2) + 2
left_v = [1, 0, 1.0 / (self.gamma - 1.0)]
right_v = [0.125, 0.0, 0.1 / (self.gamma - 1.0)]
# Left side
u[:midpoint, :] = left_v
# Right side
u[midpoint:, :] = right_v
return u
def boundary_conditions(self, u):
left_v = [1, 0, 1.0 / (self.gamma - 1.0)]
right_v = [0.125, 0.0, 0.1 / (self.gamma - 1.0)]
# Left side
u[0] = left_v
u[1] = left_v
# Right side
u[-1] = right_v
u[-2] = right_v
def flux_x(self, u):
f = np.zeros_like(u)
rho = u[:, 0]
u_x = u[:, 1] / rho
E = u[:, 2]
p = (self.gamma - 1.0) * (E - 0.5 * rho * u_x ** 2)
f[:, 0] = rho * u_x
f[:, 1] = rho * u_x ** 2 + p
f[:, 2] = u_x * (E + p)
return f
def spectral_radius_x(self, u):
rho = u[:, 0]
u_x = u[:, 1] / rho
p = (self.gamma - 1.0) * (u[:, 2] - 0.5 * rho * u_x ** 2)
return np.abs(u_x) + np.sqrt(self.gamma * p / rho)
# MHD equation
class MHD1d(centpy.Equation1d):
def pressure(self, u):
return (
u[:, 6]
- 0.5 * ((u[:, 1] ** 2 + u[:, 2] ** 2 + u[:, 3] ** 2) / u[:, 0])
- 0.5 * (self.B1 ** 2 + u[:, 4] ** 2 + u[:, 5] ** 2)
)
def initial_data(self):
u = np.zeros((self.J + 4, 7))
midpoint = int(self.J / 2) + 2
# Left side
u[:midpoint, 0] = 1.0
u[:midpoint, 1] = 0.0
u[:midpoint, 2] = 0.0
u[:midpoint, 3] = 0.0
u[:midpoint, 4] = 1.0
u[:midpoint, 5] = 0.0
u[:midpoint, 6] = 1.0 + 25.0 / 32.0
# Right side
u[midpoint:, 0] = 0.125
u[midpoint:, 1] = 0.0
u[midpoint:, 2] = 0.0
u[midpoint:, 3] = 0.0
u[midpoint:, 4] = -1.0
u[midpoint:, 5] = 0.0
u[midpoint:, 6] = 0.1 + 25.0 / 32.0
return u
def boundary_conditions(self, u):
left_v = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0 + 25.0 / 32.0]
right_v = [0.125, 0.0, 0.0, 0.0, -1.0, 0.0, 0.1 + 25.0 / 32]
if self.odd:
u[0] = left_v
u[-1] = right_v
u[-2] = right_v
else:
u[0] = left_v
u[1] = left_v
u[-1] = right_v
def flux_x(self, u):
f = np.zeros_like(u)
B1 = self.B1
p_star = self.pressure(u) + 0.5 * (B1 ** 2 + u[:, 4] ** 2 + u[:, 5] ** 2)
f[:, 0] = u[:, 1]
f[:, 1] = u[:, 1] ** 2 / u[:, 0] + p_star
f[:, 2] = u[:, 1] * u[:, 2] / u[:, 0] - B1 * u[:, 4]
f[:, 3] = u[:, 1] * u[:, 3] / u[:, 0] - B1 * u[:, 5]
f[:, 4] = u[:, 1] * u[:, 4] / u[:, 0] - B1 * u[:, 2] / u[:, 0]
f[:, 5] = u[:, 1] * u[:, 5] / u[:, 0] - B1 * u[:, 3] / u[:, 0]
f[:, 6] = (u[:, 6] + p_star) * (u[:, 1] / u[:, 0]) - B1 * (
B1 * u[:, 1] + u[:, 2] * u[:, 4] + u[:, 3] * u[:, 5]
) / u[:, 0]
return f
def spectral_radius_x(self, u):
rho = u[:, 0]
u_x = u[:, 1] / rho
p = self.pressure(u)
A = 2.0 * p / rho
B = (self.B1 ** 2 + u[:, 4] ** 2 + u[:, 5] ** 2) / rho
cf = np.sqrt(
0.5 * (A + B + np.sqrt((A + B) ** 2 - 4.0 * A * self.B1 ** 2 / rho))
)
return np.abs(u_x) + cf
###############
# 2D equations
###############
# Scalar equation
class Scalar2d(centpy.Equation2d):
def initial_data(self):
return np.sin(self.xx.T + 0.5) * np.cos(2 * self.xx.T + self.yy.T)
def boundary_conditions(self, u):
# x-boundary
u[0] = u[-4]
u[1] = u[-3]
u[-2] = u[2]
u[-1] = u[3]
# y-boundary
u[:, 0] = u[:, -4]
u[:, 1] = u[:, -3]
u[:, -2] = u[:, 2]
u[:, -1] = u[:, 3]
def flux_x(self, u):
return np.sin(u)
def flux_y(self, u):
return 1.0 / 3.0 * u ** 3
def spectral_radius_x(self, u):
return np.abs(np.cos(u))
def spectral_radius_y(self, u):
return u**2
# Euler equation
class Euler2d(centpy.Equation2d):
# Helper functions and definitions for the equation
def pressure(self, u):
return (self.gamma - 1.0) * (
u[:, :, 3] - 0.5 * (u[:, :, 1] ** 2 + u[:, :, 2] ** 2) / u[:, :, 0]
)
def euler_data(self):
gamma = self.gamma
p_one = 1.5
p_two = 0.3
p_three = 0.029
p_four = 0.3
upper_right, upper_left, lower_right, lower_left = np.ones((4, 4))
upper_right[0] = 1.5
upper_right[1] = 0.0
upper_right[2] = 0.0
upper_right[3] = (
p_one / (gamma - 1.0)
+ 0.5 * (upper_right[1] ** 2 + upper_right[2] ** 2) / upper_right[0]
)
upper_left[0] = 0.5323
upper_left[1] = 1.206 * upper_left[0]
upper_left[2] = 0.0
upper_left[3] = (
p_two / (gamma - 1.0)
+ 0.5 * (upper_left[1] ** 2 + upper_left[2] ** 2) / upper_left[0]
)
lower_right[0] = 0.5323
lower_right[1] = 0.0
lower_right[2] = 1.206 * lower_right[0]
lower_right[3] = (
p_four / (gamma - 1.0)
+ 0.5 * (lower_right[1] ** 2 + lower_right[2] ** 2) / lower_right[0]
)
lower_left[0] = 0.138
lower_left[1] = 1.206 * lower_left[0]
lower_left[2] = 1.206 * lower_left[0]
lower_left[3] = (
p_three / (gamma - 1.0)
+ 0.5 * (lower_left[1] ** 2 + lower_left[2] ** 2) / lower_left[0]
)
return upper_right, upper_left, lower_right, lower_left
# Abstract class equation definitions
def initial_data(self):
u = np.empty((self.J + 4, self.K + 4, 4))
midJ = int(self.J / 2) + 2
midK = int(self.K / 2) + 2
one_matrix = np.ones(u[midJ:, midK:].shape)
upper_right, upper_left, lower_right, lower_left = self.euler_data()
u[midJ:, midK:] = upper_right * one_matrix
u[:midJ, midK:] = upper_left * one_matrix
u[midJ:, :midK] = lower_right * one_matrix
u[:midJ, :midK] = lower_left * one_matrix
return u
def boundary_conditions(self, u):
upper_right, upper_left, lower_right, lower_left = self.euler_data()
if self.odd:
j = slice(1, -2)
u[j, 0] = u[j, 1]
u[j, -2] = u[j, -3]
u[j, -1] = u[j, -3]
u[0, j] = u[1, j]
u[-2, j] = u[-3, j]
u[-1, j] = u[-3, j]
# one
u[-2, -2] = upper_right
u[-1, -2] = upper_right
u[-2, -1] = upper_right
u[-1, -1] = upper_right
# two
u[0, -2] = upper_left
u[0, -1] = upper_left
# three
u[0, 0] = lower_left
u[0, 1] = lower_left
u[1, 0] = lower_left
u[1, 1] = lower_left
# four
u[-2, 0] = lower_right
u[-1, 0] = lower_right
u[-2, 1] = lower_right
u[-1, 1] = lower_right
else:
j = slice(2, -1)
u[j, 0] = u[j, 2]
u[j, 1] = u[j, 2]
u[j, -1] = u[j, -2]
u[0, j] = u[2, j]
u[1, j] = u[2, j]
u[-1, j] = u[-2, j]
# one
u[-1, -2] = upper_right
u[-1, -1] = upper_right
# two
u[0, -2] = upper_left
u[0, -1] = upper_left
u[1, -2] = upper_left
u[1, -1] = upper_left
# three
u[0, 0] = lower_left
u[0, 1] = lower_left
u[1, 0] = lower_left
u[1, 1] = lower_left
# four
u[-1, 0] = lower_right
u[-1, 1] = lower_right
def flux_x(self, u):
f = np.empty_like(u)
p = self.pressure(u)
f[:, :, 0] = u[:, :, 1]
f[:, :, 1] = u[:, :, 1] ** 2 / u[:, :, 0] + p
f[:, :, 2] = u[:, :, 1] * u[:, :, 2] / u[:, :, 0]
f[:, :, 3] = (u[:, :, 3] + p) * u[:, :, 1] / u[:, :, 0]
return f
def flux_y(self, u):
g = np.empty_like(u)
p = self.pressure(u)
g[:, :, 0] = u[:, :, 2]
g[:, :, 1] = u[:, :, 1] * u[:, :, 2] / u[:, :, 0]
g[:, :, 2] = u[:, :, 2] ** 2 / u[:, :, 0] + p
g[:, :, 3] = (u[:, :, 3] + p) * u[:, :, 2] / u[:, :, 0]
return g
def spectral_radius_x(self, u):
j0 = centpy._helpers.j0
rho = u[j0, j0, 0]
vx = u[j0, j0, 1] / rho
vy = u[j0, j0, 2] / rho
p = (self.gamma - 1.0) * (u[j0, j0, 3] - 0.5 * rho * (vx ** 2 + vy ** 2))
c = np.sqrt(self.gamma * p / rho)
return np.abs(vx) + c
def spectral_radius_y(self, u):
j0 = centpy._helpers.j0
rho = u[j0, j0, 0]
vx = u[j0, j0, 1] / rho
vy = u[j0, j0, 2] / rho
p = (self.gamma - 1.0) * (u[j0, j0, 3] - 0.5 * rho * (vx ** 2 + vy ** 2))
c = | np.sqrt(self.gamma * p / rho) | numpy.sqrt |
import argparse
import os
import json
import numpy as np
import PIL.Image as Image
import xml.etree.ElementTree as ET
from simplification.cutil import simplify_coords
from skimage import measure
def convert_mask_to_polygon(
mask,
max_polygon_points=100,
score_threshold=0.5,
max_refinement_iterations=25,
edge_safety_padding=1,
):
"""Convert a numpy mask to a polygon outline in normalized coordinates.
:param mask: Pixel mask, where each pixel has an object (float) score in [0, 1], in size ([1, height, width])
:type: mask: <class 'numpy.array'>
:param max_polygon_points: Maximum number of (x, y) coordinate pairs in polygon
:type: max_polygon_points: Int
:param score_threshold: Score cutoff for considering a pixel as in object.
:type: score_threshold: Float
:param max_refinement_iterations: Maximum number of times to refine the polygon
trying to reduce the number of pixels to meet max polygon points.
:type: max_refinement_iterations: Int
:param edge_safety_padding: Number of pixels to pad the mask with
:type edge_safety_padding: Int
:return: normalized polygon coordinates
:rtype: list of list
"""
# Convert to numpy bitmask
mask = mask[0]
mask_array = np.array((mask > score_threshold), dtype=np.uint8)
image_shape = mask_array.shape
# Pad the mask to avoid errors at the edge of the mask
embedded_mask = np.zeros(
(
image_shape[0] + 2 * edge_safety_padding,
image_shape[1] + 2 * edge_safety_padding,
),
dtype=np.uint8,
)
embedded_mask[
edge_safety_padding : image_shape[0] + edge_safety_padding,
edge_safety_padding : image_shape[1] + edge_safety_padding,
] = mask_array
# Find Image Contours
contours = measure.find_contours(embedded_mask, 0.5)
simplified_contours = []
for contour in contours:
# Iteratively reduce polygon points, if necessary
if max_polygon_points is not None:
simplify_factor = 0
while (
len(contour) > max_polygon_points
and simplify_factor < max_refinement_iterations
):
contour = simplify_coords(contour, simplify_factor)
simplify_factor += 1
# Convert to [x, y, x, y, ....] coordinates and correct for padding
unwrapped_contour = [0] * (2 * len(contour))
unwrapped_contour[::2] = | np.ceil(contour[:, 1]) | numpy.ceil |
import numpy as np
import matplotlib.pyplot as plt
from randomWalk_setup import get_episode_randomWalk19
from TD import TD_n
v_true = np.arange(-9,10) / 10
random_policy = np.ones([21,2]) / 2
initial_v = | np.zeros(21) | numpy.zeros |
# coding: utf-8
import os
import sys
import json
import numpy as np
import torch
import scipy.sparse
from nltk.tokenize import TweetTokenizer
from allennlp.modules.elmo import Elmo, batch_to_ids
# from allennlp.commands.elmo import ElmoEmbedder
from hyperpara import *
# Initialization for Tokenizer and Elmo Embedder
tokenize = TweetTokenizer().tokenize
# Setting for Elmo Embedder - CHANGE THE PATH
options_file = '/afs/inf.ed.ac.uk/user/s20/s2041332/mlp_project/src/elmo_2x4096_512_2048cnn_2xhighway_options.json'
weight_file = '/afs/inf.ed.ac.uk/user/s20/s2041332/mlp_project/src/elmo_2x4096_512_2048cnn_2xhighway_weights'
ee = Elmo(options_file, weight_file, 3, dropout=0)
def check(s, wi, c):
return sum([s[wi + j].lower() == c_ for j, c_ in enumerate(c) if wi + j < len(s)]) == len(c)
def ind(si, wi, ci, c):
return [[si, wi + i, ci] for i in range(len(c))]
def next_batch(data_mb):
for d in data_mb:
d['query'] = tokenize(d['query'])
d['candidates_orig'] = list(d['candidates'])
d['candidates'] = [tokenize(c) for c in d['candidates']]
d['supports'] = [tokenize(s) for s in d['supports']]
mask = [[ind(si, wi, ci, c) for wi, w in enumerate(s) for ci, c in enumerate(d['candidates'])
if check(s, wi, c)] for si, s in enumerate(d['supports'])]
nodes_id_name = []
c = 0
for e in [[[x[-1] for x in c][0] for c in s] for s in mask]:
u = []
for f in e:
u.append((c, f))
c +=1
nodes_id_name.append(u)
d['nodes_candidates_id'] = [[x[-1] for x in f][0] for e in mask for f in e]
edges_in, edges_out = [], []
for e0 in nodes_id_name:
for f0, w0 in e0:
for f1, w1 in e0:
if f0 != f1:
edges_in.append((f0, f1))
for e1 in nodes_id_name:
for f1, w1 in e1:
if e0 !=e1 and w0 == w1:
edges_out.append((f0, f1))
d['edges_in'] = edges_in
d['edges_out'] = edges_out
mask_ = [[x[:-1] for x in f] for e in mask for f in e]
# Note: the output shape of ELMo:
# AllenNLP 0.9 (original paper): ee.batch_to_embeddings: (batch_size, 3, num_timesteps, 1024)
# AllenNLP 2.0 (current version): ee(supports_ids)['elmo_representations']: [(batch_size, timesteps, embedding_dim), (batch_size, timesteps, embedding_dim), (batch_size, timesteps, embedding_dim)]
# print(len(np.array(d['supports']))) # num_sentence * len_sentence
supports_ids = batch_to_ids(d['supports']) # padding operation
# print(supports_ids.shape) # (8, 147, 50) - (batchsize, max sentence length, max word length)
candidates = ee(supports_ids)['elmo_representations'] # [(batch_size, timesteps, embedding_dim) * 3]
candidates = torch.stack(candidates) # (3, batch_size, timesteps, embedding_dim)
candidates = candidates.data.cpu().numpy().transpose((1,0,2,3)) # align with the 0.9 allenNLP
d['nodes_elmo'] = [(candidates.transpose((0, 2, 1, 3))[np.array(m).T.tolist()]).astype(np.float16)
for m in mask_]
query_ids = batch_to_ids(d['query']) # padding operation
query = ee(query_ids)['elmo_representations']
query = torch.stack(query)
query = query.data.cpu().numpy().transpose((1,0,2,3))
d['query_elmo'] = (query.transpose((0, 2, 1, 3))).astype(np.float16)[0]
id_mb = [d['id'] for d in data_mb]
candidates_mb = [d['candidates_orig'] for d in data_mb]
filt = lambda c: np.array([c[:,0].mean(0), c[-1,1], c[0,2]])
nodes_mb = np.array([np.pad(np.array([filt(c) for c in d['nodes_elmo']]),
((0, max_nodes - len(d['nodes_candidates_id'])), (0, 0), (0, 0)),
mode='constant')
for d in data_mb])
nodes_length_mb = np.stack([len(d['nodes_candidates_id']) for d in data_mb] , 0)
query_mb = np.stack([np.pad(d['query_elmo'],
((0, max_query_size - d['query_elmo'].shape[0]), (0, 0), (0, 0)),
mode='constant')
for d in data_mb], 0)
query_length_mb = np.stack([d['query_elmo'].shape[0] for d in data_mb], 0)
adj_mb = []
for d in data_mb:
adj_ = []
if len(d['edges_in']) == 0:
adj_.append( | np.zeros((max_nodes, max_nodes)) | numpy.zeros |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.exceptions import SparkPandasIndexingError
from databricks.koalas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
class BasicIndexingTest(ComparisonTestBase):
@property
def pdf(self):
return pd.DataFrame(
{"month": [1, 4, 7, 10], "year": [2012, 2014, 2013, 2014], "sale": [55, 40, 84, 31]}
)
@compare_both(almost=False)
def test_indexing(self, df):
df1 = df.set_index("month")
yield df1
yield df.set_index("month", drop=False)
yield df.set_index("month", append=True)
yield df.set_index(["year", "month"])
yield df.set_index(["year", "month"], drop=False)
yield df.set_index(["year", "month"], append=True)
yield df1.set_index("year", drop=False, append=True)
df2 = df1.copy()
df2.set_index("year", append=True, inplace=True)
yield df2
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index("unknown"))
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index(["month", "unknown"]))
for d in [df, df1, df2]:
yield d.reset_index()
yield d.reset_index(drop=True)
yield df1.reset_index(level=0)
yield df2.reset_index(level=1)
yield df2.reset_index(level=[1, 0])
yield df1.reset_index(level="month")
yield df2.reset_index(level="year")
yield df2.reset_index(level=["month", "year"])
yield df2.reset_index(level="month", drop=True)
yield df2.reset_index(level=["month", "year"], drop=True)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 3",
lambda: df1.reset_index(level=2),
)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 4",
lambda: df1.reset_index(level=[3, 2]),
)
self.assertRaisesRegex(KeyError, "unknown.*month", lambda: df1.reset_index(level="unknown"))
self.assertRaisesRegex(
KeyError, "Level unknown not found", lambda: df2.reset_index(level="unknown")
)
df3 = df2.copy()
df3.reset_index(inplace=True)
yield df3
yield df1.sale.reset_index()
yield df1.sale.reset_index(level=0)
yield df2.sale.reset_index(level=[1, 0])
yield df1.sale.reset_index(drop=True)
yield df1.sale.reset_index(name="s")
yield df1.sale.reset_index(name="s", drop=True)
s = df1.sale
self.assertRaisesRegex(
TypeError,
"Cannot reset_index inplace on a Series to create a DataFrame",
lambda: s.reset_index(inplace=True),
)
s.reset_index(drop=True, inplace=True)
yield s
yield df1
# multi-index columns
df4 = df.copy()
df4.columns = pd.MultiIndex.from_tuples(
[("cal", "month"), ("cal", "year"), ("num", "sale")]
)
df5 = df4.set_index(("cal", "month"))
yield df5
yield df4.set_index([("cal", "month"), ("num", "sale")])
self.assertRaises(KeyError, lambda: df5.reset_index(level=("cal", "month")))
yield df5.reset_index(level=[("cal", "month")])
# non-string names
df6 = df.copy()
df6.columns = [10.0, 20.0, 30.0]
df7 = df6.set_index(10.0)
yield df7
yield df6.set_index([10.0, 30.0])
yield df7.reset_index(level=10.0)
yield df7.reset_index(level=[10.0])
df8 = df.copy()
df8.columns = pd.MultiIndex.from_tuples([(10, "month"), (10, "year"), (20, "sale")])
df9 = df8.set_index((10, "month"))
yield df9
yield df8.set_index([(10, "month"), (20, "sale")])
yield df9.reset_index(level=[(10, "month")])
def test_from_pandas_with_explicit_index(self):
pdf = self.pdf
df1 = ks.from_pandas(pdf.set_index("month"))
self.assertPandasEqual(df1.to_pandas(), pdf.set_index("month"))
df2 = ks.from_pandas(pdf.set_index(["year", "month"]))
self.assertPandasEqual(df2.to_pandas(), pdf.set_index(["year", "month"]))
def test_limitations(self):
df = self.kdf.set_index("month")
self.assertRaisesRegex(
ValueError,
"Level should be all int or all string.",
lambda: df.reset_index([1, "month"]),
)
class IndexingTest(ReusedSQLTestCase):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
@property
def pdf2(self):
return pd.DataFrame(
{0: [1, 2, 3, 4, 5, 6, 7, 8, 9], 1: [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
def test_at(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at[3]
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at["ab"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.at like .at[column_name]"):
test_series.at[3, "b"]
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, "b"], 6)
self.assertEqual(kdf.at[3, "b"], pdf.at[3, "b"])
self.assert_eq(kdf.at[9, "b"], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, "b"], pdf.at[9, "b"])
# Assert .at for Series
self.assertEqual(test_series.at["b"], 6)
self.assertEqual(test_series.at["b"], pdf.loc[3].at["b"])
# Assert multi-character indices
self.assertEqual(
ks.Series([0, 1], index=["ab", "cd"]).at["ab"],
pd.Series([0, 1], index=["ab", "cd"]).at["ab"],
)
# Assert invalid column or index names result in a KeyError like with pandas
with self.assertRaises(KeyError, msg="x"):
kdf.at[3, "x"]
with self.assertRaises(KeyError, msg=99):
kdf.at[99, "b"]
with self.assertRaises(ValueError):
kdf.at[(3, 6), "b"]
with self.assertRaises(KeyError):
kdf.at[3, ("x", "b")]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.at[3, "b"] = 10
# non-string column names
pdf = self.pdf2
kdf = self.kdf2
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, 1], 6)
self.assertEqual(kdf.at[3, 1], pdf.at[3, 1])
self.assert_eq(kdf.at[9, 1], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, 1], pdf.at[9, 1])
def test_at_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
# TODO: seems like a pandas' bug in pandas>=1.1.0
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
self.assert_eq(kdf.at[(3, 6), "a"], pdf.at[(3, 6), "a"])
self.assert_eq(kdf.at[(3,), "a"], pdf.at[(3,), "a"])
self.assert_eq(list(kdf.at[(9, 0), "a"]), list(pdf.at[(9, 0), "a"]))
self.assert_eq(list(kdf.at[(9,), "a"]), list(pdf.at[(9,), "a"]))
else:
self.assert_eq(kdf.at[(3, 6), "a"], 3)
self.assert_eq(kdf.at[(3,), "a"], np.array([3]))
self.assert_eq(list(kdf.at[(9, 0), "a"]), [7, 8, 9])
self.assert_eq(list(kdf.at[(9,), "a"]), [7, 8, 9])
with self.assertRaises(ValueError):
kdf.at[3, "a"]
def test_at_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", ("bar", "one")], pdf.at["B", ("bar", "one")])
with self.assertRaises(KeyError):
kdf.at["B", "bar"]
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", (0, 1)], pdf.at["B", (0, 1)])
def test_iat(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(
TypeError,
msg="Use DataFrame.at like .iat[row_interget_position, column_integer_position]",
):
kdf.iat[3]
with self.assertRaises(
ValueError, msg="iAt based indexing on multi-index can only have tuple values"
):
kdf.iat[3, "b"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.iat like .iat[row_integer_position]"):
test_series.iat[3, "b"]
# Assert .iat for DataFrames
self.assertEqual(kdf.iat[7, 0], 8)
self.assertEqual(kdf.iat[7, 0], pdf.iat[7, 0])
# Assert .iat for Series
self.assertEqual(test_series.iat[1], 6)
self.assertEqual(test_series.iat[1], pdf.loc[3].iat[1])
# Assert invalid column or integer position result in a KeyError like with pandas
with self.assertRaises(KeyError, msg=99):
kdf.iat[0, 99]
with self.assertRaises(KeyError, msg=99):
kdf.iat[99, 0]
with self.assertRaises(ValueError):
kdf.iat[(1, 1), 1]
with self.assertRaises(ValueError):
kdf.iat[1, (1, 1)]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.iat[4, 1] = 10
def test_iat_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
self.assert_eq(kdf.iat[7, 0], pdf.iat[7, 0])
with self.assertRaises(ValueError):
kdf.iat[3, "a"]
def test_iat_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.iat[1, 3], pdf.iat[1, 3])
with self.assertRaises(KeyError):
kdf.iat[0, 99]
with self.assertRaises(KeyError):
kdf.iat[99, 0]
def test_loc(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[3:8], pdf.loc[3:8])
self.assert_eq(kdf.loc[:8], pdf.loc[:8])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[[5]], pdf.loc[[5]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 8]], pdf.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 9]], pdf.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.loc[np.array([3, 4, 1, 9])], pdf.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[5:5], pdf.a.loc[5:5])
self.assert_eq(kdf.a.loc[3:8], pdf.a.loc[3:8])
self.assert_eq(kdf.a.loc[:8], pdf.a.loc[:8])
self.assert_eq(kdf.a.loc[3:], pdf.a.loc[3:])
self.assert_eq(kdf.a.loc[[5]], pdf.a.loc[[5]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 8]], pdf.a.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 9]], pdf.a.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.a.loc[np.array([3, 4, 1, 9])],
# pdf.a.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[[]], pdf.a.loc[[]])
self.assert_eq(kdf.a.loc[np.array([])], pdf.a.loc[np.array([])])
self.assert_eq(kdf.loc[1000:], pdf.loc[1000:])
self.assert_eq(kdf.loc[-2000:-1000], pdf.loc[-2000:-1000])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertRaises(KeyError, lambda: kdf.loc[10])
self.assertRaises(KeyError, lambda: kdf.a.loc[10])
# monotonically increasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 1, 2, 2, 2, 4, 5, 6])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:2], pdf.loc[:2])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[4:], pdf.loc[4:])
self.assert_eq(kdf.loc[3:2], pdf.loc[3:2])
self.assert_eq(kdf.loc[-1:2], pdf.loc[-1:2])
self.assert_eq(kdf.loc[3:10], pdf.loc[3:10])
# monotonically decreasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[6, 5, 5, 4, 4, 4, 2, 1, 0])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:4], pdf.loc[:4])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[2:], pdf.loc[2:])
self.assert_eq(kdf.loc[2:3], pdf.loc[2:3])
self.assert_eq(kdf.loc[2:-1], pdf.loc[2:-1])
self.assert_eq(kdf.loc[10:3], pdf.loc[10:3])
# test when type of key is string and given value is not included in key
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=["a", "b", "d"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["a":"z"], pdf.loc["a":"z"])
# KeyError when index is not monotonic increasing or decreasing
# and specified values don't exist in index
kdf = ks.DataFrame([[1, 2], [4, 5], [7, 8]], index=["cobra", "viper", "sidewinder"])
self.assertRaises(KeyError, lambda: kdf.loc["cobra":"koalas"])
self.assertRaises(KeyError, lambda: kdf.loc["koalas":"viper"])
kdf = ks.DataFrame([[1, 2], [4, 5], [7, 8]], index=[10, 30, 20])
self.assertRaises(KeyError, lambda: kdf.loc[0:30])
self.assertRaises(KeyError, lambda: kdf.loc[10:100])
def test_loc_non_informative_index(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[20:30], pdf.loc[20:30])
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[20:20], pdf.loc[20:20])
def test_loc_with_series(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[kdf.a % 2 == 0], pdf.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, "a"], pdf.loc[pdf.a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, ["a"]], pdf.loc[pdf.a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.a % 2 == 0], pdf.a.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0], pdf.loc[pdf.copy().a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, "a"], pdf.loc[pdf.copy().a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, ["a"]], pdf.loc[pdf.copy().a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.copy().a % 2 == 0], pdf.a.loc[pdf.copy().a % 2 == 0])
def test_loc_noindex(self):
kdf = self.kdf
kdf = kdf.reset_index()
pdf = self.pdf
pdf = pdf.reset_index()
self.assert_eq(kdf[["a"]], pdf[["a"]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
def test_loc_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[5:9], pdf.loc[5:9])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
# TODO: self.assert_eq(kdf.loc[(5, 3)], pdf.loc[(5, 3)])
# TODO: self.assert_eq(kdf.loc[(9, 0)], pdf.loc[(9, 0)])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertTrue((kdf.a.loc[(5, 3)] == pdf.a.loc[(5, 3)]).all())
self.assert_eq(kdf.a.loc[(9, 0)], pdf.a.loc[(9, 0)])
# monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "d"), ("z", "e")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
# monotonically increasing first index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "a"), ("z", "e")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
for rows_sel in [
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
# not monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("z", "e"), ("y", "d"), ("y", "c"), ("x", "b"), ("x", "a")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically decreasing", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
def test_loc2d_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[:, "a"], pdf.loc[:, "a"])
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"c"], pdf.loc[:, "a":"c"])
self.assert_eq(kdf.loc[:, "b":"c"], pdf.loc[:, "b":"c"])
def test_loc2d(self):
kdf = self.kdf
pdf = self.pdf
# index indexer is always regarded as slice for duplicated values
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[[5], "a"], pdf.loc[[5], "a"])
self.assert_eq(kdf.loc[5:5, ["a"]], pdf.loc[5:5, ["a"]])
self.assert_eq(kdf.loc[[5], ["a"]], pdf.loc[[5], ["a"]])
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[3:8, "a"], pdf.loc[3:8, "a"])
self.assert_eq(kdf.loc[:8, "a"], pdf.loc[:8, "a"])
self.assert_eq(kdf.loc[3:, "a"], pdf.loc[3:, "a"])
self.assert_eq(kdf.loc[[8], "a"], pdf.loc[[8], "a"])
self.assert_eq(kdf.loc[3:8, ["a"]], pdf.loc[3:8, ["a"]])
self.assert_eq(kdf.loc[:8, ["a"]], pdf.loc[:8, ["a"]])
self.assert_eq(kdf.loc[3:, ["a"]], pdf.loc[3:, ["a"]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 3], ['a']], pdf.loc[[3, 4, 3], ['a']])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[3, 3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3:, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[kdf.a % 2 == 0, 3])
self.assert_eq(kdf.loc[5, "a"], pdf.loc[5, "a"])
self.assert_eq(kdf.loc[9, "a"], pdf.loc[9, "a"])
self.assert_eq(kdf.loc[5, ["a"]], pdf.loc[5, ["a"]])
self.assert_eq(kdf.loc[9, ["a"]], pdf.loc[9, ["a"]])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"d"], pdf.loc[:, "a":"d"])
self.assert_eq(kdf.loc[:, "c":"d"], pdf.loc[:, "c":"d"])
# bool list-like column select
bool_list = [True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
self.assertRaises(IndexError, lambda: kdf.loc[:, bool_list[:-1]])
self.assertRaises(IndexError, lambda: kdf.loc[:, np.array(bool_list + [True])])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[:, pd.Series(bool_list)])
# non-string column names
kdf = self.kdf2
pdf = self.pdf2
self.assert_eq(kdf.loc[5:5, 0], pdf.loc[5:5, 0])
self.assert_eq(kdf.loc[5:5, [0]], pdf.loc[5:5, [0]])
self.assert_eq(kdf.loc[3:8, 0], pdf.loc[3:8, 0])
self.assert_eq(kdf.loc[3:8, [0]], pdf.loc[3:8, [0]])
self.assert_eq(kdf.loc[:, 0:0], pdf.loc[:, 0:0])
self.assert_eq(kdf.loc[:, 0:3], pdf.loc[:, 0:3])
self.assert_eq(kdf.loc[:, 2:3], pdf.loc[:, 2:3])
def test_loc2d_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["B":"B", "bar"], pdf.loc["B":"B", "bar"])
self.assert_eq(kdf.loc["B":"B", ["bar"]], pdf.loc["B":"B", ["bar"]])
self.assert_eq(kdf.loc[:, "bar":"bar"], pdf.loc[:, "bar":"bar"])
self.assert_eq(kdf.loc[:, "bar":("baz", "one")], pdf.loc[:, "bar":("baz", "one")])
self.assert_eq(
kdf.loc[:, ("bar", "two"):("baz", "one")], pdf.loc[:, ("bar", "two"):("baz", "one")]
)
self.assert_eq(kdf.loc[:, ("bar", "two"):"bar"], pdf.loc[:, ("bar", "two"):"bar"])
self.assert_eq(kdf.loc[:, "a":"bax"], pdf.loc[:, "a":"bax"])
self.assert_eq(
kdf.loc[:, ("bar", "x"):("baz", "a")],
pdf.loc[:, ("bar", "x"):("baz", "a")],
almost=True,
)
pdf = pd.DataFrame(
np.random.randn(3, 4),
index=["A", "B", "C"],
columns=pd.MultiIndex.from_tuples(
[("bar", "two"), ("bar", "one"), ("baz", "one"), ("baz", "two")]
),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:, "bar":"baz"], pdf.loc[:, "bar":"baz"])
self.assertRaises(KeyError, lambda: kdf.loc[:, "bar":("baz", "one")])
self.assertRaises(KeyError, lambda: kdf.loc[:, ("bar", "two"):"bar"])
# bool list-like column select
bool_list = [True, False, True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["B":"B", 0], pdf.loc["B":"B", 0])
self.assert_eq(kdf.loc["B":"B", [0]], pdf.loc["B":"B", [0]])
self.assert_eq(kdf.loc[:, 0:0], pdf.loc[:, 0:0])
self.assert_eq(kdf.loc[:, 0:(1, 1)], pdf.loc[:, 0:(1, 1)])
self.assert_eq(kdf.loc[:, (0, 2):(1, 1)], pdf.loc[:, (0, 2):(1, 1)])
self.assert_eq(kdf.loc[:, (0, 2):0], pdf.loc[:, (0, 2):0])
self.assert_eq(kdf.loc[:, -1:2], pdf.loc[:, -1:2])
def test_loc2d_with_known_divisions(self):
pdf = pd.DataFrame(
np.random.randn(20, 5), index=list("abcdefghijklmnopqrst"), columns=list("ABCDE")
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[["a"], "A"], pdf.loc[["a"], "A"])
self.assert_eq(kdf.loc[["a"], ["A"]], pdf.loc[["a"], ["A"]])
self.assert_eq(kdf.loc["a":"o", "A"], pdf.loc["a":"o", "A"])
self.assert_eq(kdf.loc["a":"o", ["A"]], pdf.loc["a":"o", ["A"]])
self.assert_eq(kdf.loc[["n"], ["A"]], pdf.loc[["n"], ["A"]])
self.assert_eq(kdf.loc[["a", "c", "n"], ["A"]], pdf.loc[["a", "c", "n"], ["A"]])
# TODO?: self.assert_eq(kdf.loc[['t', 'b'], ['A']], pdf.loc[['t', 'b'], ['A']])
# TODO?: self.assert_eq(kdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']],
# TODO?: pdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']])
@unittest.skip("TODO: should handle duplicated columns properly")
def test_loc2d_duplicated_columns(self):
pdf = pd.DataFrame(
np.random.randn(20, 5), index=list("abcdefghijklmnopqrst"), columns=list("AABCD")
)
kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(kdf.loc[['a'], 'A'], pdf.loc[['a'], 'A'])
# TODO?: self.assert_eq(kdf.loc[['a'], ['A']], pdf.loc[['a'], ['A']])
self.assert_eq(kdf.loc[["j"], "B"], pdf.loc[["j"], "B"])
self.assert_eq(kdf.loc[["j"], ["B"]], pdf.loc[["j"], ["B"]])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'A'], pdf.loc['a':'o', 'A'])
# TODO?: self.assert_eq(kdf.loc['a':'o', ['A']], pdf.loc['a':'o', ['A']])
self.assert_eq(kdf.loc["j":"q", "B"], pdf.loc["j":"q", "B"])
self.assert_eq(kdf.loc["j":"q", ["B"]], pdf.loc["j":"q", ["B"]])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(kdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
# TODO?: self.assert_eq(kdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
self.assert_eq(kdf.loc[kdf.B > 0, "B"], pdf.loc[pdf.B > 0, "B"])
# TODO?: self.assert_eq(kdf.loc[kdf.B > 0, ['A', 'C']], pdf.loc[pdf.B > 0, ['A', 'C']])
def test_getitem(self):
pdf = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
columns=list("ABC"),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf["A"], pdf["A"])
self.assert_eq(kdf[["A", "B"]], pdf[["A", "B"]])
self.assert_eq(kdf[kdf.C], pdf[pdf.C])
self.assertRaises(KeyError, lambda: kdf["X"])
self.assertRaises(KeyError, lambda: kdf[["A", "X"]])
self.assertRaises(AttributeError, lambda: kdf.X)
# not str/unicode
# TODO?: pdf = pd.DataFrame(np.random.randn(10, 5))
# TODO?: kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(kdf[0], pdf[0])
# TODO?: self.assert_eq(kdf[[1, 2]], pdf[[1, 2]])
# TODO?: self.assertRaises(KeyError, lambda: pdf[8])
# TODO?: self.assertRaises(KeyError, lambda: pdf[[1, 8]])
# non-string column names
pdf = pd.DataFrame(
{
10: [1, 2, 3, 4, 5, 6, 7, 8, 9],
20: [9, 8, 7, 6, 5, 4, 3, 2, 1],
30: [True, False, True] * 3,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf[10], pdf[10])
self.assert_eq(kdf[[10, 20]], pdf[[10, 20]])
def test_getitem_slice(self):
pdf = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
index=list("abcdefghi"),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf["a":"e"], pdf["a":"e"])
self.assert_eq(kdf["a":"b"], pdf["a":"b"])
self.assert_eq(kdf["f":], pdf["f":])
def test_loc_on_numpy_datetimes(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(np.datetime64, ["2014", "2015", "2016"]))
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["2014":"2015"], pdf.loc["2014":"2015"])
def test_loc_on_pandas_datetimes(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(pd.Timestamp, ["2014", "2015", "2016"]))
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["2014":"2015"], pdf.loc["2014":"2015"])
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_loc_datetime_no_freq(self):
datetime_index = pd.date_range("2016-01-01", "2016-01-31", freq="12h")
datetime_index.freq = None # FORGET FREQUENCY
pdf = pd.DataFrame({"num": range(len(datetime_index))}, index=datetime_index)
kdf = ks.from_pandas(pdf)
slice_ = slice("2016-01-03", "2016-01-05")
result = kdf.loc[slice_, :]
expected = pdf.loc[slice_, :]
self.assert_eq(result, expected)
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_loc_timestamp_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
kdf = ks.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf.loc['2011-01-02'],
# TODO?: kdf.loc['2011-01-02'])
self.assert_eq(pdf.loc["2011-01-02":"2011-01-05"], kdf.loc["2011-01-02":"2011-01-05"])
# series
# TODO?: self.assert_eq(pdf.A.loc['2011-01-02'],
# TODO?: kdf.A.loc['2011-01-02'])
self.assert_eq(pdf.A.loc["2011-01-02":"2011-01-05"], kdf.A.loc["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(pdf.loc['2011-01'], kdf.loc['2011-01'])
# TODO?: self.assert_eq(pdf.loc['2011'], kdf.loc['2011'])
self.assert_eq(pdf.loc["2011-01":"2012-05"], kdf.loc["2011-01":"2012-05"])
self.assert_eq(pdf.loc["2011":"2015"], kdf.loc["2011":"2015"])
# series
# TODO?: self.assert_eq(pdf.B.loc['2011-01'], kdf.B.loc['2011-01'])
# TODO?: self.assert_eq(pdf.B.loc['2011'], kdf.B.loc['2011'])
self.assert_eq(pdf.B.loc["2011-01":"2012-05"], kdf.B.loc["2011-01":"2012-05"])
self.assert_eq(pdf.B.loc["2011":"2015"], kdf.B.loc["2011":"2015"])
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_getitem_timestamp_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
kdf = ks.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf["2011-01-02":"2011-01-05"], kdf["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(pdf['2011-01'], kdf['2011-01'])
# TODO?: self.assert_eq(pdf['2011'], kdf['2011'])
self.assert_eq(pdf["2011-01":"2012-05"], kdf["2011-01":"2012-05"])
self.assert_eq(pdf["2011":"2015"], kdf["2011":"2015"])
@unittest.skip("TODO?: period index can't convert to DataFrame correctly")
def test_getitem_period_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="H", periods=100),
)
kdf = ks.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf["2011-01-02":"2011-01-05"], kdf["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": | np.random.randn(100) | numpy.random.randn |
"""Logger application"""
from __future__ import annotations
import csv
import json
import queue
import sys
import threading
import time
import traceback
import warnings
from datetime import datetime
from typing import Any, Optional
from serial.threaded import ReaderThread #type:ignore
from flask import render_template, request, Flask, Response
from waitress import serve #type:ignore
import numpy as np
from config import loadnames
import lib
SAMPLE_DATA_FILENAME = 'data_sample.csv'
RAW_DATA_FILENAME = 'data_raw.csv'
HOURLY_DATA_FILENAME = 'data_hourly.csv'
# there's no default here, the default is in the arduino code
current_config = lib.Conf()
app = Flask(__name__)
rng = np.random.default_rng() #type:ignore
raw_queue: queue.SimpleQueue[bytes] = queue.SimpleQueue()
randx = rng.integers(1023, size=100)
randy = rng.integers(1023, size=100)
latest_va = {'load1': lib.VA('load1', 5000, 1000, randx, randy),
'load2': lib.VA('load2', 5000, 1000, randx, randy),
'load3': lib.VA('load3', 5000, 1000, randx, randy),
'load4': lib.VA('load4', 5000, 1000, randx, randy),
'load5': lib.VA('load5', 5000, 1000, randx, randy),
'load6': lib.VA('load6', 5000, 1000, randx, randy),
'load7': lib.VA('load7', 5000, 1000, randx, randy),
'load8': lib.VA('load8', 5000, 1000, randx, randy),}
def va_updater(volts_amps: lib.VA) -> None:
"""Callback for updating VA"""
latest_va[volts_amps.load] = volts_amps
TRIM_FREQ = 200 # trim every N rows
TRIM_SIZE = 10000 # size of raw file to retain
def make_sample_line(now_s: str, samples: lib.VA) -> str:
"""sample data is for debugging"""
sample_v_mean: float = | np.mean(samples.volts) | numpy.mean |
from collections import OrderedDict
import numpy as np
import collections
from copy import deepcopy
import random
import robosuite.utils.transform_utils as T
from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements, new_site
from robosuite.utils.mjcf_utils import CustomMaterial
from robosuite.environments.manipulation.single_arm_env import SingleArmEnv
from robosuite.models.arenas import TableArena
from robosuite.models.objects import BoxObject, CylinderObject, PlateWithHoleObject
from robosuite.models.tasks import ManipulationTask
from robosuite.utils.placement_samplers import UniformRandomSampler
from robosuite.utils.observables import Observable, sensor
class Lift(SingleArmEnv):
"""
This class corresponds to the lifting task for a single robot arm.
Args:
robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env
(e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms)
Note: Must be a single single-arm robot!
env_configuration (str): Specifies how to position the robots within the environment (default is "default").
For most single arm environments, this argument has no impact on the robot setup.
controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a
custom controller. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
gripper_types (str or list of str): type of gripper, used to instantiate
gripper models from gripper factory. Default is "default", which is the default grippers(s) associated
with the robot(s) the 'robots' specification. None removes the gripper, and any other (valid) model
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
:`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial
joint positions. Setting this value to `None` or 0.0 results in no noise being applied.
If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied,
If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range
:`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform"
Should either be single dict if same noise value is to be used for all robots or else it should be a
list of the same length as "robots" param
:Note: Specifying "default" will automatically use the default noise settings.
Specifying None will automatically create the required dict with "magnitude" set to 0.0.
table_full_size (3-tuple): x, y, and z dimensions of the table.
table_friction (3-tuple): the three mujoco friction parameters for
the table.
use_camera_obs (bool): if True, every observation includes rendered image(s)
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
placement_initializer (ObjectPositionSampler): if provided, will
be used to place objects on every reset, else a UniformRandomSampler
is used by default.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering
render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
the user using the mouse
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise.
render_gpu_device_id (int): corresponds to the GPU device id to use for offscreen rendering.
Defaults to -1, in which case the device will be inferred from environment variables
(GPUS or CUDA_VISIBLE_DEVICES).
control_freq (float): how many control signals to receive in every second. This sets the amount of
simulation time that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else,
only calls sim.reset and resets all robosuite-internal variables
camera_names (str or list of str): name of camera to be rendered. Should either be single str if
same name is to be used for all cameras' rendering or else it should be a list of cameras to render.
:Note: At least one camera must be specified if @use_camera_obs is True.
:Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the
convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each
robot's camera list).
camera_heights (int or list of int): height of camera frame. Should either be single int if
same height is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_widths (int or list of int): width of camera frame. Should either be single int if
same width is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single
bool if same depth setting is to be used for all cameras or else it should be a list of the same length as
"camera names" param.
Raises:
AssertionError: [Invalid number of robots specified]
"""
def __init__(
self,
robots,
env_configuration="default",
controller_configs=None,
gripper_types="default",
initialization_noise="default",
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1., 5e-3, 1e-4),
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
reward_shaping=False,
placement_initializer=None,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
num_via_point=0,
dist_error=0.002,
angle_error=0,
tanh_value=2.0,
r_reach_value=0.94,
error_type='circle',
control_spec=36,
peg_radius=(0.0025, 0.0025), # (0.00125, 0.00125)
peg_length=0.12,
):
#min jerk param:
self.num_via_point = num_via_point
# settings for table top
self.via_point = OrderedDict()
self.table_full_size = table_full_size
self.table_friction = table_friction
self.table_offset = np.array((0, 0, 0.8))
# Save peg specs
self.peg_radius = peg_radius
self.peg_length = peg_length
self.dist_error = dist_error
self.angle_error = angle_error
# reward configuration
self.reward_scale = reward_scale
self.reward_shaping = reward_shaping
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# object placement initializer
self.placement_initializer = placement_initializer
super().__init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types="default",
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
dist_error=dist_error,
tanh_value=tanh_value,
r_reach_value=r_reach_value,
error_type=error_type,
control_spec=control_spec,
)
def reward(self, action=None):
"""
Reward function for the task.
Sparse un-normalized reward:
- a discrete reward of 100.0 is provided if the peg is inside the plate's hole
- Note that we enforce that it's inside at an appropriate angle (cos(theta) > 0.95).
Un-normalized summed components if using reward shaping:
- ????
Note that the final reward is normalized and scaled by reward_scale / 5.0 as
well so that the max score is equal to reward_scale
"""
# TODO - reward(self, action=None) - change this function
reward = 0
time_factor = (self.horizon - self.timestep) / self.horizon
# Right location and angle
if self._check_success() and self.num_via_point == 1:
# reward = self.horizon * time_factor
self.success += 1
if self.success == 2:
S = 1
return reward
# use a shaping reward
if self.reward_shaping:
# Grab relevant values
t, d, cos = self._compute_orientation()
# Reach a terminal state as quickly as possible
# reaching reward
reward += self.r_reach * 5 * cos # * time_factor
# Orientation reward
reward += self.hor_dist
# reward += 1 - np.tanh(2.0*d)
# reward += 1 - np.tanh(np.abs(t))
reward += cos
# if we're not reward shaping, we need to scale our sparse reward so that the max reward is identical
# to its dense version
else:
reward *= 5.0
if self.reward_scale is not None:
reward *= self.reward_scale
if (self.num_via_point == 1
and ((abs(self.hole_pos[0] - self.peg_pos[0]) > 0.014
or abs(self.hole_pos[1] - self.peg_pos[1]) > 0.014)
and self.peg_pos[2] < self.table_offset[2] + 0.1)
or self.horizon - self.timestep == 1
):
reward = 0 * -self.horizon / 3
# self.checked = (self.num_via_points-2)
# self.switch = 0
# self.switch_seq = 0
# self.success = 0
# # self.trans *= 3
# self.reset_via_point()
# self.built_min_jerk_traj()
return reward
def on_peg(self):
res = False
if (
abs(self.hole_pos[0] - self.peg_pos[0]) < 0.015
and abs(self.hole_pos[1] - self.peg_pos[1]) < 0.007
and abs(self.hole_pos[1] - self.peg_pos[1]) + abs(self.hole_pos[0] - self.peg_pos[0]) < 0.04
and self.peg_pos[2] < self.table_offset[2] + 0.05
):
res = True
return res
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
# Adjust base pose accordingly
xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
self.robots[0].robot_model.set_base_xpos(xpos)
# load model for table top workspace
mujoco_arena = TableArena(
table_full_size=self.table_full_size,
table_friction=self.table_friction,
table_offset=self.table_offset,
)
# Arena always gets set to zero origin
mujoco_arena.set_origin([0, 0, 0])
self.peg_radius = 0.025
self.peg_height = 0.12
self.peg_z_offset = 0.9
self.rotation = None
x_range = [-0.0, 0.0]
y_range = [-0.1, -0.1]
# initialize objects of interest
self.peg = CylinderObject(name='peg',
size=[self.peg_radius, self.peg_height],
density=1,
duplicate_collision_geoms=True,
rgba=[1, 0, 0, 1], joints=None)
# load peg object (returns extracted object in XML form)
peg_obj = self.peg.get_obj()
# set pegs position relative to place where it is being placed
peg_obj.set("pos", array_to_string((0, 0, -0.04)))
peg_obj.append(new_site(name="peg_site", pos=(0, 0, self.peg_height), size=(0.005,)))
# append the object top the gripper (attach body to body)
# main_eef = self.robots[0].robot_model.eef_name # 'robot0_right_hand'
main_eef = self.robots[0].gripper.bodies[1] # 'gripper0_eef' body
main_model = self.robots[0].robot_model # <robosuite.models.robots.manipulators.ur5e_robot.UR5e at 0x7fd9ead87ca0>
main_body = find_elements(root=main_model.worldbody, tags="body", attribs={"name": main_eef}, return_first=True)
main_body.append(peg_obj) # attach body to body
if self.rotation is None:
rot_angle = np.random.uniform(high=2 * np.pi, low=0)
elif isinstance(self.rotation, collections.Iterable):
rot_angle = np.random.uniform(
high=max(self.rotation), low=min(self.rotation)
)
else:
rot_angle = self.rotation
hole_rot_set = str(np.array([np.cos(rot_angle / 2), 0, 0, np.sin(rot_angle / 2)]))
hole_pos_set = np.array([np.random.uniform(high=x_range[0], low=x_range[1]), np.random.uniform(high=y_range[0], low=y_range[1]), 0.83])
hole_pos_str = ' '.join(map(str, hole_pos_set))
hole_rot_str = ' '.join(map(str, hole_rot_set))
self.hole = PlateWithHoleObject(name='hole')
hole_obj = self.hole.get_obj()
hole_obj.set("quat", hole_rot_str)
hole_obj.set("pos", hole_pos_str)
self.model = ManipulationTask(
mujoco_arena=mujoco_arena,
mujoco_robots=[robot.robot_model for robot in self.robots],
mujoco_objects=self.hole
)
# Make sure to add relevant assets from peg and hole objects
self.model.merge_assets(self.peg)
## Create placement initializer
# if self.placement_initializer is not None:
# self.placement_initializer.reset()
# self.placement_initializer.add_objects(self.peg)
# else:
# """Object samplers use the bottom_site and top_site sites of each object in order to place objects on top of other objects,
# and the horizontal_radius_site site in order to ensure that objects do not collide with one another. """
# task includes arena, robot, and objects of interest
# self.model = ManipulationTask(
# mujoco_arena=mujoco_arena,
# mujoco_robots=[robot.robot_model for robot in self.robots],
# mujoco_objects=[self.peg],
# )
# def _load_model(self):
# """
# Loads an xml model, puts it in self.model
# """
# super()._load_model()
#
# # Adjust base pose accordingly
# xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
# self.robots[0].robot_model.set_base_xpos(xpos)
#
# # load model for table top workspace
# mujoco_arena = TableArena(
# table_full_size=self.table_full_size,
# table_friction=self.table_friction,
# table_offset=self.table_offset,
# )
#
# # Arena always gets set to zero origin
# mujoco_arena.set_origin([0, 0, 0])
#
# # initialize objects of interest
# tex_attrib = {
# "type": "cube",
# }
# mat_attrib = {
# "texrepeat": "1 1",
# "specular": "0.4",
# "shininess": "0.1",
# }
# redwood = CustomMaterial(
# texture="WoodRed",
# tex_name="redwood",
# mat_name="redwood_mat",
# tex_attrib=tex_attrib,
# mat_attrib=mat_attrib,
# )
# # self.cube = BoxObject(
# # name="cube",
# # size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015],
# # size_max=[0.022, 0.022, 0.022], # [0.018, 0.018, 0.018])
# # rgba=[1, 0, 0, 1],
# # material=redwood,
# # )
# self.cube = PlateWithHoleObject(name="cube")
# # Create placement initializer
# if self.placement_initializer is not None:
# self.placement_initializer.reset()
# self.placement_initializer.add_objects(self.cube)
# else:
# self.placement_initializer = UniformRandomSampler(
# name="cube",
# mujoco_objects=self.cube,
# x_range=[-0.03, 0.03],
# y_range=[-0.03, 0.03],
# rotation=None,
# ensure_object_boundary_in_range=True,
# ensure_valid_placement=True,
# reference_pos=self.table_offset,
# z_offset=0.01,
# )
#
# self.placement_initializer.reset()
#
#
# # Add this nut to the placement initializerr
# self.placement_initializer.add_objects(self.cube)
# # task includes arena, robot, and objects of interest
# # self.hole = PlateWithHoleObject(name='hole',)
# # self.hole = PlateWith5mmHoleObject(name='peg_hole')
# # hole_obj = self.hole.get_obj()
# # hole_obj.set("quat", "0 0 0.707 0.707")
# # hole_obj.set("pos", "0.1 0.2 1.17")
#
# self.model = ManipulationTask(
# mujoco_arena=mujoco_arena,
# mujoco_robots=[robot.robot_model for robot in self.robots],
# mujoco_objects=self.cube,
# )
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._setup_references()
# Additional object references from this env
self.peg_body_id = self.sim.model.body_name2id(self.peg.root_body)
def _setup_observables(self):
"""
Sets up observables to be used for this environment. Creates object-based observables if enabled
Returns:
OrderedDict: Dictionary mapping observable names to its corresponding Observable object
"""
observables = super()._setup_observables()
# low-level object information
if self.use_object_obs:
# Get robot prefix and define observables modality
pf = self.robots[0].robot_model.naming_prefix
modality = "object"
# peg-related observables
@sensor(modality=modality)
def peg_pos(obs_cache):
return np.array(self.sim.data.body_xpos[self.peg_body_id])
@sensor(modality=modality)
def peg_quat(obs_cache):
return T.convert_quat(np.array(self.sim.data.body_xquat[self.peg_body_id]), to="xyzw")
@sensor(modality=modality)
def gripper_to_peg_pos(obs_cache):
return obs_cache[f"{pf}eef_pos"] - obs_cache["peg_pos"] if \
f"{pf}eef_pos" in obs_cache and "peg_pos" in obs_cache else np.zeros(3)
sensors = [peg_pos, peg_quat, gripper_to_peg_pos]
names = [s.__name__ for s in sensors]
# Create observables
for name, s in zip(names, sensors):
observables[name] = Observable(
name=name,
sensor=s,
sampling_rate=self.control_freq,
)
return observables
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
self.num_via_point = 0
self.success = 0
self.enter = 1
self.t_bias = 0
self.reset_via_point()
# Reset all object positions using initializer sampler if we're not directly loading from an xml
# if not self.deterministic_reset:
# Sample from the placement initializer for all objects
# object_placements = self.placement_initializer.sample()
#
# # Loop through all objects and reset their positions
# for obj_pos, obj_quat, obj in object_placements.values():
# self.sim.data.set_joint_qpos(obj.joints, np.concatenate([np.array(obj_pos), np.array(obj_quat)]))
def visualize(self, vis_settings):
"""
In addition to super call, visualize gripper site proportional to the distance to the peg.
Args:
vis_settings (dict): Visualization keywords mapped to T/F, determining whether that specific
component should be visualized. Should have "grippers" keyword as well as any other relevant
options specified.
"""
# Run superclass method first
super().visualize(vis_settings=vis_settings)
# Color the gripper visualization site according to its distance to the peg
if vis_settings["grippers"]:
self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.peg)
def _check_success(self):
"""
Check if peg is successfully aligned and placed within the hole
Returns:
bool: True if peg is placed in hole correctly
"""
# TODO - _check_success(self) - change this function
# calculat pegs end position.
self.r_reach = 0
self.hor_dist = 0
peg_mat = self.sim.data.body_xmat[self.peg_body_id]
peg_mat.shape = (3, 3)
peg_pos_center = self.sim.data.body_xpos[self.peg_body_id]
handquat = T.convert_quat(self.sim.data.get_body_xquat("robot0_right_hand"), to="xyzw")
handDCM = T.quat2mat(handquat)
self.peg_pos = self.sim.data.get_site_xpos(
"peg_site") # peg_pos_center + (handDCM @ [0, 0, 2*self.peg_length]).T
self.hole_pos = self.sim.data.get_site_xpos("hole_middle_cylinder")
hole_mat = self.sim.data.body_xmat[self.sim.model.body_name2id("hole_hole")]
hole_mat.shape = (3, 3)
dist = | np.linalg.norm(self.peg_pos - self.hole_pos) | numpy.linalg.norm |
import tclab
import time
import numpy as np
import sys
import first_principles_model as fp
def doublet_test(data_file='step_test.csv', show_plot=True):
'''doublet test the system and save data to given file path'''
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
tc1.Q1(u)
tc1.Q2(u)
current_time = 0
while current_time < 1200:
try:
# read temp, humidity and time
humid_in, temp_in = Adafruit_DHT.read_retry(
11, 4, retries=5, delay_seconds=1)
humid_out, temp_out = Adafruit_DHT.read_retry(
11, 17, retries=5, delay_seconds=1)
current_time = time.time() - start_time
if humid_in is None:
# Rejects failed readings
continue
if humid_in > 100:
# Corrupted data, so ignore it
continue
if current_time > 60:
u = 100
if current_time > 800:
u = 50
tc1.Q1(u)
tc1.Q2(u)
# print current values
print('time: {:.1f}, u: {}, h_in: {}, t_in: {}, h1: {}, h2: {}, h_out: {}, t_out: {}'
.format(current_time, u, humid_in, temp_in, tc1.T1, tc1.T2, humid_out, temp_out))
data = np.vstack([data, [current_time, u, humid_in,
temp_in, humid_out, temp_out, tc1.T1, tc1.T2]])
np.savetxt(data_file, data[1:],
delimiter=',', header=csv_file_header)
except KeyboardInterrupt:
print('Exiting...')
tc1.LED(0)
return
except ValueError as error:
# Handles cases when the heater overheats
print(error)
def run_controller(run_time, PID_parameters, show_plot=True):
'''
Run the main loop
run_time total run time in minutes
show_plot whether to show the dynamic plot of the system
'''
Kc, tau_I, tau_D = PID_parameters
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
Qss = 0 # 0% heater to start
err = | np.zeros(run_time*60) | numpy.zeros |
from torchvision.transforms import ToTensor, Compose, RandomAffine
from torch.utils.data import Dataset
import pandas as pd
from os.path import join
from PIL import Image, ImageOps
import numpy as np
import random
class SketchyDB(Dataset):
""" class representing the Sketchy Database """
def __init__(self, root_dir, sketch_type='sketch', sketch_difficulties=(1, 2, 3, 4),
filter_erroneous=False, filter_context=False, filter_ambiguous=False, filter_pose=False,
train=True, split=0.8, sketch_augmentation=False, seed=1337):
"""
:param root_dir: root directory of the dataset
:param sketch_type: which sketch augmentation to use ('sketch', 'centered_scaled',
'centered_bbox', 'centered_bbox_scaled_least', 'centered_bbox_scaled_most'
or 'centered_bbox_scaled_nonuniform'):
'sketch' : sketch canvas is rendered to 256x256 such that it
undergoes the same scaling as the paired photo
'centered_scaled' : sketch is centered and uniformly scaled
such that its greatest dimension (x or y)
fills 78% of the canvas (roughly the same
as in Eitz 2012 sketch data set)
'centered_bbox' : sketch is translated such that it is
centered on the object bounding box
'centered_bbox_scaled_least' : sketch is centered on bounding box and
is uniformly scaled such that one dimension
(x or y; whichever requires the least amount
of scaling) fits within the bounding box
'centered_bbox_scaled_most' : sketch is centered on bounding box and
is uniformly scaled such that one dimension
(x or y; whichever requires the most amount
of scaling) fits within the bounding box
'centered_bbox_scaled_nonuniform' : sketch is centered on bounding box and
is non-uniformly scaled such that it
completely fits within the bounding box
:param sketch_difficulties: between 1 and 4, list of difficulties to use
:param filter_erroneous: filter sketches tagged as erroneous
:param filter_context: filter sketches tagged as containing context
:param filter_ambiguous: filter sketches tagged as ambiguous
:param filter_pose: filter sketches tagged with an incorrect pose
:param train: train or test set
:param split: train/test ratio
:param sketch_augmentation: whether to augment sketches
"""
sketch_folders = {'sketch': 'tx_000000000000',
'centered_scaled': 'tx_000100000000',
'centered_bbox': 'tx_000000000010',
'centered_bbox_scaled_least': 'tx_000000000110',
'centered_bbox_scaled_most': 'tx_000000001010',
'centered_bbox_scaled_nonuniform': 'tx_000000001110'}
self.sketch_dir = join(root_dir, 'sketch', sketch_folders[sketch_type])
self.sketch_augmentation = sketch_augmentation
if sketch_augmentation is True:
self.sketch_augmentation_transform = Compose([RandomAffine(10,
translate=(0.08, 0.08),
scale=(0.9, 1.1),
fillcolor=255,
shear=10)])
self.sketch_transform = Compose([ToTensor()])
database_dir = join(root_dir, 'info/stats.csv')
df = pd.read_csv(database_dir, usecols=['CategoryID', 'Category', 'ImageNetID', 'SketchID', 'Difficulty',
'Error?', 'Context?', 'Ambiguous?', 'WrongPose?'])
# filter data
df = df[df['Difficulty'].isin(sketch_difficulties)]
if filter_erroneous is True:
df = df[df['Error?'] == 0]
if filter_context is True:
df = df[df['Context?'] == 0]
if filter_ambiguous is True:
df = df[df['Ambiguous?'] == 0]
if filter_pose is True:
df = df[df['WrongPose?'] == 0]
self.seed = seed
self.train = train
self.data = df
self.num_samples = len(self.data.index)
self.indices = list(range(self.num_samples))
random.Random(self.seed).shuffle(self.indices)
if self.train is True:
self.indices = self.indices[:int(len(self.indices) * split)]
else:
self.indices = self.indices[int(len(self.indices) * split):]
self.num_samples = len(self.indices)
self.num_classes = df['CategoryID'].nunique()
def __len__(self):
return self.num_samples * 2 if self.train is True else self.num_samples # (*2 for flipping every single sample)
def __getitem__(self, idx):
hflip = False
if self.train:
idx = idx // 2
hflip = (idx % 2 == 0)
idx = self.indices[idx]
sketch_path = join(self.sketch_dir, self.data.iloc[idx]['Category'].replace(' ', '_'),
f'{self.data.iloc[idx]["ImageNetID"]}-{self.data.iloc[idx]["SketchID"]}.png')
sketch = Image.open(sketch_path).resize((224, 224))
if self.sketch_augmentation is True:
sketch = self.sketch_augmentation_transform(sketch)
if hflip is True:
sketch = sketch.transpose(Image.FLIP_LEFT_RIGHT)
sketch = ImageOps.invert(sketch)
sketch = | np.array(sketch) | numpy.array |
#%%
"""
Created on July 05 2021
Simulation of, E(W(t)|F(s)) = W(s) using nested Monte Carlo
This code is purely educational and comes from "Financial Engineering" course by <NAME>
The course is based on the book “Mathematical Modeling and Computation
in Finance: With Exercises and Python and MATLAB Computer Codes”,
by <NAME> and <NAME>, World Scientific Publishing Europe Ltd, 2019.
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
t = 10
s = 5
NoOfPaths=1000
NoOfSteps=10
# First part to caclulate E(W(t)|F(0)) = W(0)=0
def martingaleA():
W_t = np.random.normal(0.0,pow(t,0.5),[NoOfPaths,1])
E_W_t = np.mean(W_t)
print("mean value equals to: %.2f while the expected value is W(0) =%0.2f " %(E_W_t,0.0))
# Second part requiring nested Monte Carlo simulation E(W(t)|F(s)) = W(s)
def martingaleB():
Z = np.random.normal(0.0,1.0,[NoOfPaths,NoOfSteps])
W = np.zeros([NoOfPaths,NoOfSteps+1])
# time-step from [t0,s]
dt1 = s / float(NoOfSteps)
for i in range(0,NoOfSteps):
# making sure that samples from normal have mean 0 and variance 1
Z[:,i] = (Z[:,i] - np.mean(Z[:,i])) / np.std(Z[:,i])
W[:,i+1] = W[:,i] + pow(dt1,0.5)*Z[:,i]
#W_s is the last column of W
W_s = W[:,-1]
#for every path W(s) we perform sub-simulation until time t and calculate
#the expectation
# time-step from [s,t]
dt2 = (t-s)/float(NoOfSteps);
W_t = np.zeros([NoOfPaths,NoOfSteps+1]);
#Store the results
E_W_t = np.zeros([NoOfPaths])
Error=[]
for i in range(0,NoOfPaths):
#Sub-simulation from time "s" until "t"
W_t[:,0] = W_s[i];
Z = | np.random.normal(0.0,1.0,[NoOfPaths,NoOfSteps]) | numpy.random.normal |
# coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, <NAME>
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""This module contains the main `Lattice` object."""
import pickle
import logging
import warnings
import itertools
import collections
import numpy as np
from copy import deepcopy
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from typing import Union, Optional, Tuple, List, Iterator, Sequence, Callable, Any, Dict
from .utils import (
ArrayLike,
frmt_num,
SiteOccupiedError,
NoAtomsError,
NoBaseNeighborsError,
NotBuiltError
)
from .spatial import (
build_periodic_translation_vector,
vindices,
interweave,
cell_size,
cell_volume,
WignerSeitzCell,
KDTree
)
from .plotting import (
draw_points,
draw_vectors,
draw_cell,
draw_indices
)
from .unitcell import Atom
from .data import LatticeData
__all__ = ["Lattice"]
logger = logging.getLogger(__name__)
class Lattice:
"""Object representing the basis and data of a bravais lattice."""
DIST_DECIMALS: int = 6 # Decimals used for rounding distances
RVEC_TOLERANCE: float = 1e-6 # Tolerance for reciprocal vectors/lattice
def __init__(self, vectors: Union[float, Sequence[float], Sequence[Sequence[float]]],
**kwargs):
"""Initialize a new ``Lattice`` instance.
Parameters
----------
vectors: array_like or float
The vectors that span the basis of the lattice.
"""
# Vector basis
self._vectors = np.atleast_2d(vectors).T
self._vectors_inv = np.linalg.inv(self._vectors)
self._dim = len(self._vectors)
self._cell_size = cell_size(self.vectors)
self._cell_volume = cell_volume(self.vectors)
# Atom data
self._num_base = 0
self._atoms = list()
self._positions = list()
# Raw neighbor data without including connections
self._raw_base_neighbors = None
self._raw_distance_matrix = None
self._raw_num_neighbors = None
# Neighbour data
self._connections = None
self._base_neighbors = None
self._num_neighbors = None
self._distance_matrix = None
self._distances = None
# Lattice Cache
self.data = LatticeData()
self.shape = None
self.periodic_axes = list()
logger.debug("Lattice initialized (D=%i)\n"
"vectors:\n%s", self.dim, self._vectors)
@classmethod
def chain(cls, a: Optional[float] = 1.0, **kwargs) -> 'Lattice':
return cls(a, **kwargs)
@classmethod
def square(cls, a: Optional[float] = 1.0, **kwargs) -> 'Lattice':
return cls(a * np.eye(2), **kwargs)
@classmethod
def rectangular(cls, a1: Optional[float] = 1., a2: Optional[float] = 1., **kwargs) -> 'Lattice':
return cls(np.array([[a1, 0], [0, a2]]), **kwargs)
@classmethod
def hexagonal(cls, a: Optional[float] = 1.0, **kwargs) -> 'Lattice':
vectors = a / 2 * np.array([[3, np.sqrt(3)], [3, -np.sqrt(3)]])
return cls(vectors, **kwargs)
@classmethod
def oblique(cls, alpha: float, a1: Optional[float] = 1.0,
a2: Optional[float] = 1.0, **kwargs) -> 'Lattice':
vectors = np.array([[a1, 0], [a2 * np.cos(alpha), a2 * np.sin(alpha)]])
return cls(vectors, **kwargs)
@classmethod
def hexagonal3D(cls, a: Optional[float] = 1., az: Optional[float] = 1., **kwargs) -> 'Lattice': # noqa
vectors = a / 2 * np.array([[3, np.sqrt(3), 0], [3, -np.sqrt(3), 0], [0, 0, az]])
return cls(vectors, **kwargs)
@classmethod
def sc(cls, a: Optional[float] = 1.0, **kwargs) -> 'Lattice':
return cls(a * np.eye(3), **kwargs)
@classmethod
def fcc(cls, a: Optional[float] = 1.0, **kwargs) -> 'Lattice':
vectors = a/2 * np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1]])
return cls(vectors, **kwargs)
@classmethod
def bcc(cls, a: Optional[float] = 1.0, **kwargs) -> 'Lattice':
vectors = a/2 * np.array([[1, 1, 1], [1, -1, 1], [-1, 1, 1]])
return cls(vectors, **kwargs)
# ==============================================================================================
@property
def dim(self) -> int:
"""int : The dimension of the vector basis."""
return self._dim
@property
def vectors(self) -> np.ndarray:
"""np.ndarray : Array with basis vectors as rows."""
return self._vectors.T
@property
def vectors3d(self) -> np.ndarray:
"""np.ndarray : Basis vectors expanded to three dimensions."""
vectors = np.eye(3)
vectors[:self.dim, :self.dim] = self._vectors
return vectors.T
@property
def norms(self) -> np.ndarray:
"""np.ndarray : Lengths of the basis-vectors."""
return np.linalg.norm(self._vectors, axis=0)
@property
def cell_size(self) -> np.ndarray:
"""np.ndarray : The shape of the box spawned by the given vectors."""
return self._cell_size
@property
def cell_volume(self) -> float:
"""float : The volume of the unit cell defined by the primitive vectors."""
return self._cell_volume
@property
def num_base(self) -> int:
"""int : The number of atoms in the unitcell."""
return self._num_base
@property
def atoms(self) -> List[Atom]:
"""list of Atom : List of the atoms in the unitcell."""
return self._atoms
@property
def atom_positions(self) -> List[np.ndarray]:
"""list of np.ndarray : List of corresponding positions of the atoms in the unitcell."""
return self._positions
@property
def num_distances(self) -> int:
"""int : The maximal number of distances between the lattice sites."""
return int(np.max(self._connections))
@property
def num_neighbors(self) -> np.ndarray:
"""np.ndarray : The number of neighbors of each atom in the unitcell."""
return self._num_neighbors
@property
def base_neighbors(self):
"""The neighbors of the unitcell at the origin."""
return self._base_neighbors
@property
def distances(self) -> np.ndarray:
"""List of distances between the lattice sites."""
return self._distances
@property
def num_sites(self) -> int:
"""int : Number of sites in lattice data (only available if lattice has been built)."""
return self.data.num_sites
@property
def num_cells(self) -> int:
"""int : Number of unit-cells in lattice data (only available if lattice has been built)."""
return np.unique(self.data.indices[:, :-1], axis=0).shape[0]
def itransform(self, world_coords: Union[Sequence[int], Sequence[Sequence[int]]]) -> np.ndarray:
"""Transform the world-coordinates (x, y, ...) into the basis coordinates (n, m, ...)
Parameters
----------
world_coords : (..., N) array_like
Returns
-------
basis_coords : (..., N) np.ndarray
"""
world_coords = np.atleast_1d(world_coords)
return np.inner(world_coords, self._vectors_inv)
def transform(self, basis_coords: Union[Sequence[int], Sequence[Sequence[int]]]) -> np.ndarray:
""" Transform the basis-coordinates (n, m, ...) into the world coordinates (x, y, ...)
Parameters
----------
basis_coords : (..., N) array_like
Returns
-------
world_coords : (..., N) np.ndarray
"""
basis_coords = np.atleast_1d(basis_coords)
return np.inner(basis_coords, self._vectors)
def translate(self, nvec: Union[int, Sequence[int], Sequence[Sequence[int]]],
r: Optional[Union[float, Sequence[float]]] = 0.0) -> np.ndarray:
r""" Translates the given postion vector r by the translation vector n.
The position is calculated using the translation vector .math`n` and the
atom position in the unitcell .math:`r`:
..math::
R = \sum_i n_i v_i + r
Parameters
----------
nvec : (..., N) array_like
Translation vector in the lattice basis.
r : (N) array_like, optional
The position in cartesian coordinates. If no vector is passed only
the translation is returned.
Returns
-------
r_trans : (N) np.ndarray
"""
r = np.atleast_1d(r)
nvec = np.atleast_1d(nvec)
return r + | np.inner(nvec, self._vectors) | numpy.inner |
import unittest
import raocp.core.cones as core_cones
import numpy as np
class TestCones(unittest.TestCase):
__real = core_cones.Real()
__zero = core_cones.Zero()
__nonnegative_orthant = core_cones.NonnegativeOrthant()
__second_order_cone = core_cones.SecondOrderCone()
__cartesian = core_cones.Cartesian([__real, __zero, __nonnegative_orthant, __second_order_cone])
__num_samples = 100
__sample_multiplier = 10
__cone_dimension = 20
__num_test_repeats = 100
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
def test_dimension_check(self):
# cone size equals vector size
_ = core_cones._check_dimension("Real", 5, np.ones(5))
def test_dimension_check_failure(self):
# cone size does not equal vector size
with self.assertRaises(ValueError):
_ = core_cones._check_dimension("Real", 5, np.ones(6))
def test_real_project(self):
# create cone
cone_type = "Real"
real = TestCones.__real
# create point for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
# create points for test
samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
samples[i] = np.random.randint(-100, 100, 20) # real samples
# test real cone
self.assertEqual(cone_type, type(real).__name__)
projection = real.project(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,)),
samples[i].reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,))) <= 0)
def test_real_project_dual(self):
# create cone
cone_type = "Real"
real = TestCones.__real
# create point for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension)) \
.reshape((TestCones.__cone_dimension, 1))
# create points for test
dual_samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
dual_samples[i] = np.zeros(TestCones.__cone_dimension) # real dual samples (zero)
# test real cone
self.assertEqual(cone_type, type(real).__name__)
projection_onto_dual = real.project_onto_dual(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,)),
dual_samples[i].reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,))) <= 0)
def test_zero_project(self):
# create cone
cone_type = "Zero"
zero = TestCones.__zero
# create points for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
samples[i] = np.zeros(TestCones.__cone_dimension) # zero samples
# test zero
self.assertEqual(cone_type, type(zero).__name__)
projection = zero.project(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,)),
samples[i].reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,))) <= 0)
def test_zero_project_dual(self):
# create cone
cone_type = "Zero"
zero = TestCones.__zero
# create points for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
dual_samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
dual_samples[i] = np.random.randint(-100, 100, TestCones.__cone_dimension) # zero dual samples (real)
# test zero dual
self.assertEqual(cone_type, type(zero).__name__)
projection_onto_dual = zero.project_onto_dual(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,)),
dual_samples[i].reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,))) <= 0)
def test_nonnegative_orthant_project(self):
# create cone
cone_type = "NonnegativeOrthant"
nonnegative_orthant = TestCones.__nonnegative_orthant
# create points for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
samples[i] = np.random.randint(0, 100, TestCones.__cone_dimension) # non samples
# test non
self.assertEqual(cone_type, type(nonnegative_orthant).__name__)
projection = nonnegative_orthant.project(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,)),
samples[i].reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,))) <= 0)
def test_nonnegative_orthant_project_dual(self):
# create cone
cone_type = "NonnegativeOrthant"
nonnegative_orthant = TestCones.__nonnegative_orthant
# create points for projection
vector = np.array(TestCones.__sample_multiplier * | np.random.rand(TestCones.__cone_dimension) | numpy.random.rand |
import numpy as np
import pandas as pd
import ml_metrics
import base64
import matplotlib.pyplot as plt
import seaborn as sns
from mlxtend.frequent_patterns import apriori
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import svds
from IPython.display import HTML
"""
redcarpet: Module for recommender systems using sets
"""
"""
HELPER METHODS
"""
def nonzero_index_set(arr):
"""
Returns a set of indices corresponding to non-zero
entries in a numpy array (or other list-like).
"""
res = set()
for i, val in enumerate(arr):
if val > 0:
res.add(i)
return res
def mat_to_sets(mat):
"""
Converts a numpy matrix into a list of sets of column
indices corresponding to non-zero row entries.
"""
return [nonzero_index_set(row) for row in mat]
def get_recs(user_recs, k=None):
"""
Extracts recommended item indices, leaving out their scores.
params:
user_recs: list of lists of tuples of recommendations where
each tuple has (item index, relevance score) with the
list of tuples sorted in order of decreasing relevance
k: maximumum number of recommendations to include for each
user, if None, include all recommendations
returns:
list of lists of recommendations where each
list has the column indices of recommended items
sorted in order they appeared in user_recs
"""
recs = [[item for item, score in recs][0:k] for recs in user_recs]
return recs
def write_kaggle_recs(recs_list, filename=None, headers=["Id", "Predicted"]):
"""
Writes recommendations to file in Kaggle submission format.
params:
recs_list: list of lists of recommendations where each
list has the column indices of recommended items
sorted in order of decreasing relevance
filename: path to file for writing output
headers: list of strings of output columns, defaults to
submission columns: ["Id", "Predicted"]
returns:
int: number of non-header lines, where each line represents
a user and the recommendations given to them
"""
if filename is None:
raise ValueError("Must provide a filename.")
lines = [",".join(headers)]
for i, recs in enumerate(recs_list):
lines.append("{},{}".format(i, " ".join([str(v) for v in recs])))
text = "\n".join(lines)
with open(filename, "w") as file:
file.write(text)
return len(lines) - 1
def download_kaggle_recs(recs_list, filename=None, headers=["Id", "Predicted"]):
"""
Writes recommendations to file in Kaggle submission format.
params:
recs_list: list of lists of recommendations where each
list has the column indices of recommended items
sorted in order of decreasing relevance
filename: path to file for writing output
headers: list of strings of output columns, defaults to
submission columns: ["Id", "Predicted"]
returns:
html: HTML download link to display in a notebook, click
to download the submission file
"""
# Based on: https://www.kaggle.com/rtatman/download-a-csv-file-from-a-kernel
if filename is None:
raise ValueError("Must provide a filename.")
rec_df = pd.DataFrame(
[(i, " ".join([str(r) for r in recs])) for i, recs in enumerate(recs_list)],
columns=headers,
)
csv = rec_df.to_csv(index=False)
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = """<a download="{filename}"
href="data:text/csv;base64,{payload}"
target="_blank">Download ({lines} lines): {filename}</a>"""
html = html.format(payload=payload, filename=filename, lines=len(rec_df))
return HTML(html)
def check_list_of_sets(s_data, var_name):
if not isinstance(s_data, list):
raise ValueError(
"{} must be a list of sets. Got: {}".format(var_name, type(s_data))
)
if len(s_data) > 0:
entry = s_data[0]
if not isinstance(entry, set):
raise ValueError(
"{} must be a list of sets. Got list of: {}".format(
var_name, type(entry)
)
)
"""
EVALUATION METRICS
"""
def mapk_score(s_hidden, recs_pred, k=10):
"""
Computes the mean average precision at k (MAP@K) of recommendations.
MAP@K = mean AP@K score over all users
AP@K = (1 / min(m, k)) * sum from 1 to k of (precision at i * relevance of ith item)
Where m is the number of items in a user's hidden set
Where k is the number of items recommended to each user
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
float, range [0, 1]
"""
check_list_of_sets(s_hidden, "s_hidden")
return ml_metrics.mapk(s_hidden, recs_pred, k)
def uhr_score(s_hidden, recs_pred, k=10):
"""
Computes the user hit rate (UHR) score of recommendations.
UHR = the fraction of users whose top list included at
least one item also in their hidden set.
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
float, range [0, 1]
"""
check_list_of_sets(s_hidden, "s_hidden")
if len(s_hidden) != len(recs_pred):
note = "Length of true list {} does not match length of recommended list {}."
raise ValueError(note.format(len(s_hidden), len(recs_pred)))
scores = []
for r_true, r_pred_orig in zip(s_hidden, recs_pred):
r_pred = list(r_pred_orig)[0:k]
intersect = set(r_true).intersection(set(r_pred))
scores.append(1 if len(intersect) > 0 else 0)
return np.mean(scores)
def get_apk_scores(s_hidden, recs_pred, k=10):
"""
Returns the average precision at k (AP@K) for each user.
AP@K = (1 / min(m, k)) * sum from 1 to k of (precision at i * relevance of ith item)
Where m is the number of items in a user's hidden set
Where k is the number of items recommended to each user
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
list of floats, each float in the range [0, 1]
"""
check_list_of_sets(s_hidden, "s_hidden")
apks = []
for r_true, r_pred in zip(s_hidden, recs_pred):
apk = mapk_score([r_true], [r_pred], k=k)
apks.append(apk)
return apks
def get_hit_counts(s_hidden, recs_pred, k=10):
"""
Returns the number of successful recommendations for each user.
params:
s_hidden: list of sets of hidden items for each user
recs_pred: list of lists of recommended items, with each list
k: number of recommendations to use in top set
returns:
list of integers, each integer in the range [0, k]
"""
check_list_of_sets(s_hidden, "s_hidden")
hits = []
for r_true, r_pred in zip(s_hidden, recs_pred):
ix = r_true.intersection(set(r_pred[0:k]))
hits.append(len(ix))
return hits
def get_all_scores(rec_scores, k=10):
"""
Get scores of all items in the list of lists of recommendations.
"""
all_scores = []
for recs in rec_scores:
for (item, score) in recs[0:k]:
all_scores.append(score)
return all_scores
"""
ANALYSIS TOOLS
"""
def show_apk_dist(s_hidden, models, k=10, bin_size=0.1):
"""
Plot a histogram of average precision scores for all users.
"""
bins = np.arange(0, 1 + bin_size, bin_size)
pal = sns.color_palette("hls", len(models))
for ((rec_scores, name), color) in zip(models, pal):
apks = get_apk_scores(s_hidden, get_recs(rec_scores), k=k)
sns.distplot(apks, kde=False, label=name, bins=bins, color=color)
plt.xticks(bins)
plt.xlabel("Average Precision in Top {}".format(k))
plt.ylabel("Number of Users")
plt.title("AP@K Score Distribution")
plt.gcf().set_size_inches((8, 5))
plt.grid()
plt.legend(
loc="upper left", bbox_to_anchor=(1.0, 1.0), title="Models", frameon=False
)
plt.show()
def show_hit_dist(s_hidden, models, k=10):
"""
Plot a histogram of hit counts for all users.
"""
bins = range(k + 1)
pal = sns.color_palette("hls", len(models))
for ((rec_scores, name), color) in zip(models, pal):
hits = get_hit_counts(s_hidden, get_recs(rec_scores), k=k)
sns.distplot(hits, kde=False, label=name, bins=bins, color=color)
plt.xticks(bins)
plt.xlabel("Number of Successful Recommendations in Top {}".format(k))
plt.ylabel("Number of Users")
plt.title("Hit Count Distribution")
plt.gcf().set_size_inches((8, 5))
plt.grid()
plt.legend(
loc="upper left", bbox_to_anchor=(1.0, 1.0), title="Models", frameon=False
)
plt.show()
def show_score_dist(models, k=10, bins=None):
"""
Plot a histogram of item recommendation scores for all users.
"""
pal = sns.color_palette("hls", len(models))
for ((rec_scores, name), color) in zip(models, pal):
scores = get_all_scores(rec_scores, k=k)
if bins is not None:
sns.distplot(scores, kde=False, label=name, color=color, bins=bins)
else:
sns.distplot(scores, kde=False, label=name, color=color)
if bins is not None:
plt.xticks(bins)
plt.xlabel("Score for Recommended Item in Top {}".format(k))
plt.ylabel("Number of Items")
plt.title("Item Score Distribution")
plt.gcf().set_size_inches((8, 5))
plt.grid()
plt.legend(
loc="upper left", bbox_to_anchor=(1.0, 1.0), title="Models", frameon=False
)
plt.show()
def show_user_detail(s_input, s_hidden, rec_scores, uid, name_fn=None, k=10):
"""
Show the detailed results of recommendations to a user.
"""
s_pred = get_recs(rec_scores)
print("User: {}".format(uid))
print("Given: {}".format(sorted(s_input[uid])))
print("Recommended: {}".format(sorted(s_pred[uid])))
print("Actual: {}".format(sorted(s_hidden[uid])))
set_intersect = set(s_pred[uid]).intersection(set(s_hidden[uid]))
n_intersect = len(set_intersect)
apk = mapk_score([s_hidden[uid]], [s_pred[uid]], k)
print()
print("Recommendation Hits = {}".format(n_intersect))
print("Average Precision = {0:.3f}".format(apk))
print()
print("All Recommendation Scores:")
for i, (item_id, score) in enumerate(rec_scores[uid]):
hit = "Y" if item_id in s_hidden[uid] else " "
item_name = "Item {}".format(item_id)
if name_fn is not None:
item_name = name_fn(item_id)
print(
"{0}. [{3}] ({2:.3f}) {1}".format(
str(i + 1).zfill(2), item_name, score, hit
)
)
def show_user_recs(s_hidden, rec_scores, k=10):
"""
Show a table of recommendation results by user.
"""
apks = get_apk_scores(s_hidden, get_recs(rec_scores), k=k)
hits = get_hit_counts(s_hidden, get_recs(rec_scores), k=k)
cols = ["User", "APK", "Hits"]
data = {"User": range(len(rec_scores)), "APK": apks, "Hits": hits}
return pd.DataFrame(data)[cols]
def show_item_recs(s_hidden, rec_scores, k=10):
"""
Show a table of recommendation results by item.
"""
item_res = {}
for (user, likes) in zip(rec_scores, s_hidden):
for (i, score) in user:
if i not in item_res:
item_res[i] = {"Item": i, "Results": [], "Scores": []}
item_res[i]["Results"].append(1 if i in likes else 0)
item_res[i]["Scores"].append(score)
res = []
for i in item_res:
record = item_res[i]
total = len(record["Results"])
hits = sum(record["Results"])
res.append(
{
"Item": i,
"Recommended": total,
"Hits": hits,
"Hit Rate": hits / total,
"Avg Score": | np.mean(record["Scores"]) | numpy.mean |
import random
import numpy as np
import cv2
import torch
import torch.utils.data as data
import data.util as util
import os.path as osp
class LQGT_dataset(data.Dataset):
def __init__(self, opt):
super(LQGT_dataset, self).__init__()
self.opt = opt
self.data_type = self.opt['data_type']
self.paths_LQ, self.paths_GT = None, None
self.sizes_GT, self.paths_GT = util.get_image_paths(self.data_type, opt['dataroot_GT'])
self.sizes_LQ, self.paths_LQ = util.get_image_paths(self.data_type, opt['dataroot_LQ'])
assert self.paths_GT, 'Error: GT path is empty.'
if self.paths_LQ and self.paths_GT:
assert len(self.paths_LQ) == len(
self.paths_GT
), 'GT and LQ datasets have different number of images - {}, {}.'.format(
len(self.paths_LQ), len(self.paths_GT))
self.mask_folder = opt['dataroot_mask']
def __getitem__(self, index):
GT_path, LQ_path = None, None
scale = self.opt['scale']
GT_size = self.opt['GT_size']
# get GT image
GT_path = self.paths_GT[index]
img_GT = util.read_img(None, GT_path)
# get LQ image
LQ_path = self.paths_LQ[index]
img_LQ = util.read_img(None, LQ_path)
# get mask when mask folder is not None
if self.mask_folder is not None:
mask_name = osp.basename(LQ_path)[:-4] + '.npy'
mask_path = osp.join(self.mask_folder, mask_name)
mask = util.read_npy(mask_path)
mask = np.expand_dims(mask, 2).repeat(3, axis=2)
if self.opt['phase'] == 'train':
H, W, C = img_LQ.shape
H_gt, W_gt, C = img_GT.shape
if H != H_gt:
print('*******wrong image*******:{}'.format(LQ_path))
LQ_size = GT_size // scale
# randomly crop
if GT_size is not None:
rnd_h = random.randint(0, max(0, H - LQ_size))
rnd_w = random.randint(0, max(0, W - LQ_size))
img_LQ = img_LQ[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
rnd_h_GT, rnd_w_GT = int(rnd_h * scale), int(rnd_w * scale)
img_GT = img_GT[rnd_h_GT:rnd_h_GT + GT_size, rnd_w_GT:rnd_w_GT + GT_size, :]
# augmentation - flip, rotate
img_LQ, img_GT = util.augment([img_LQ, img_GT], self.opt['use_flip'],
self.opt['use_rot'])
# resize for alignment
H, W, C = img_LQ.shape
if H%32!=0 or W%32!=0:
H_new = int(np.ceil(H / 32) * 32)
W_new = int(np.ceil(W / 32) * 32)
img_LQ = cv2.resize(img_LQ, (W_new, H_new))
img_GT = cv2.resize(img_GT, (W_new, H_new))
if self.mask_folder is None:
r = 0.95
mask = np.max(img_LQ, 2)
mask = np.minimum(1.0, np.maximum(0, mask - r) / (1 - r))
mask = | np.expand_dims(mask, 2) | numpy.expand_dims |
import logging
import numpy as np
from ctapipe.calib.camera import CameraCalibrator
from ctapipe.io import (
EventSource,
read_table,
)
from numba import njit
from scipy.interpolate import interp1d
from traitlets.config import Config
from lstchain.io import standard_config
from lstchain.io.config import read_configuration_file
__all__ = [
'add_noise_in_pixels',
'calculate_required_additional_nsb',
'calculate_noise_parameters',
'random_psf_smearer',
'set_numba_seed',
'tune_nsb_on_waveform',
]
log = logging.getLogger(__name__)
# number of neighbors of completely surrounded pixels of hexagonal cameras:
N_PIXEL_NEIGHBORS = 6
SMEAR_PROBABILITIES = np.full(N_PIXEL_NEIGHBORS, 1 / N_PIXEL_NEIGHBORS)
def add_noise_in_pixels(rng, image, extra_noise_in_dim_pixels,
extra_bias_in_dim_pixels, transition_charge,
extra_noise_in_bright_pixels):
"""
Addition of Poissonian noise to the pixels
Parameters
----------
rng : `numpy.random.default_rng`
Random number generator
image: `np.ndarray`
Charges (p.e.) in the camera
extra_noise_in_dim_pixels: `float`
Mean additional number of p.e. to be added (Poisson noise) to
pixels with charge below transition_charge. To be tuned by
comparing the starting MC and data
extra_bias_in_dim_pixels: `float`
Mean bias (w.r.t. original charge) of the new charge in pixels.
Should be 0 for non-peak-search pulse integrators. To be tuned by
comparing the starting MC and data
transition_charge: `float`
Border between "dim" and "bright" pixels. To be tuned by
comparing the starting MC and data
extra_noise_in_bright_pixels: `float`
Mean additional number of p.e. to be added (Poisson noise) to
pixels with charge above transition_charge. This is unbiased,
i.e. Poisson noise is introduced, and its average subtracted,
so that the mean charge in bright pixels remains unaltered.
This is because we assume that above transition_charge the
integration window is determined by the Cherenkov light, and
would not be modified by the additional NSB noise (presumably
small compared to the C-light). To be tuned by
comparing the starting MC and data
Returns
-------
image: `np.ndarray`
Modified (noisier) image
"""
bright_pixels = image > transition_charge
noise = np.where(bright_pixels, extra_noise_in_bright_pixels,
extra_noise_in_dim_pixels)
bias = np.where(bright_pixels, -extra_noise_in_bright_pixels,
extra_bias_in_dim_pixels - extra_noise_in_dim_pixels)
image = image + rng.poisson(noise) + bias
return image
@njit(cache=True)
def set_numba_seed(seed):
np.random.seed(seed)
@njit(cache=True)
def random_psf_smearer(image, fraction, indices, indptr):
"""
Random PSF smearer
Parameters
----------
image: `np.ndarray`
Charges (p.e.) in the camera
indices : `camera_geometry.neighbor_matrix_sparse.indices`
Pixel indices.
indptr : camera_geometry.neighbor_matrix_sparse.indptr
fraction: `float`
Fraction of the light in a pixel that will be distributed among its
immediate surroundings, i.e. immediate neighboring pixels, according
to Poisson statistics. Some light is lost for pixels which are at
the camera edge and hence don't have all possible neighbors
Returns
-------
new_image: `np.ndarray`
Modified (smeared) image
"""
new_image = image.copy()
for pixel in range(len(image)):
if image[pixel] <= 0:
continue
to_smear = np.random.poisson(image[pixel] * fraction)
if to_smear == 0:
continue
# remove light from current pixel
new_image[pixel] -= to_smear
# add light to neighbor pixels
neighbors = indices[indptr[pixel]: indptr[pixel + 1]]
n_neighbors = len(neighbors)
# all neighbors are equally likely to receive the charge
# we always distribute the charge into 6 neighbors, so that charge
# on the edges of the camera is lost
neighbor_charges = np.random.multinomial(to_smear, SMEAR_PROBABILITIES)
for n in range(n_neighbors):
neighbor = neighbors[n]
new_image[neighbor] += neighbor_charges[n]
return new_image
def calculate_noise_parameters(simtel_filename, data_dl1_filename,
config_filename=None):
"""
Calculates the parameters needed to increase the noise in an MC DL1 file
to match the noise in a real data DL1 file, using add_noise_in_pixels
The returned parameters are those needed by the function add_noise_in_pixels (see
description in its documentation above).
Parameters
----------
simtel_filename: `str`
a simtel file containing showers, from the same
production (same NSB and telescope settings) as the MC DL1 file below. It
must contain pixel-wise info on true number of p.e.'s from C-photons (
will be used to identify pixels which only contain noise).
data_dl1_filename: `str`
a real data DL1 file (processed with calibration
settings corresponding to those with which the MC is to be processed).
It must contain calibrated images, i.e. "DL1a" data. This file has the
"target" noise which we want to have in the MC files, for better
agreement of data and simulations.
config_filename: `str`
configuration file containing the calibration
settings used for processing both the data and the MC files above
Returns
-------
extra_noise_in_dim_pixels: `float`
Extra noise of dim pixels.
extra_bias_in_dim_pixels: `float`
Extra bias of dim pixels.
extra_noise_in_bright_pixels: `float`
Extra noise of bright pixels
"""
log.setLevel(logging.INFO)
if config_filename is None:
config = standard_config
else:
config = read_configuration_file(config_filename)
# Real data DL1 tables:
data_dl1_calibration = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/calibration')
data_dl1_pedestal = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/pedestal')
data_dl1_parameters = read_table(data_dl1_filename,
'/dl1/event/telescope/parameters/LST_LSTCam')
data_dl1_image = read_table(data_dl1_filename,
'/dl1/event/telescope/image/LST_LSTCam')
unusable = data_dl1_calibration['unusable_pixels']
# Locate pixels with HG declared unusable either in original calibration or
# in interleaved events:
bad_pixels = unusable[0][0] # original calibration
for tf in unusable[1:][0]: # calibrations with interleaveds
bad_pixels = np.logical_or(bad_pixels, tf)
good_pixels = ~bad_pixels
# First index: 1,2,... = values from interleaveds (0 is for original
# calibration run)
# Second index: 0 = high gain
# Third index: pixels
# HG adc to pe conversion factors from interleaved calibrations:
data_HG_dc_to_pe = data_dl1_calibration['dc_to_pe'][:, 0, :]
# Pixel-wise pedestal standard deviation (for an unbiased extractor),
# in adc counts:
data_HG_ped_std = data_dl1_pedestal['charge_std'][1:, 0, :]
# indices which connect each pedestal calculation to a given calibration:
calibration_id = data_dl1_pedestal['calibration_id'][1:]
# convert pedestal st deviations to p.e.
dummy = []
for i, x in enumerate(data_HG_ped_std[:, ]):
dummy.append(x * data_HG_dc_to_pe[calibration_id[i],])
dummy = np.array(dummy)
# Average for all interleaved calibrations (in case there are more than one)
data_HG_ped_std_pe = np.mean(dummy, axis=0) # one value per pixel
# Identify noisy pixels, likely containing stars - we want to adjust MC to
# the average diffuse NSB across the camera
data_median_std_ped_pe = np.median(data_HG_ped_std_pe)
data_std_std_ped_pe = np.std(data_HG_ped_std_pe)
log.info(f'Real data: median across camera of good pixels\' pedestal std '
f'{data_median_std_ped_pe:.3f} p.e.')
brightness_limit = data_median_std_ped_pe + 3 * data_std_std_ped_pe
too_bright_pixels = (data_HG_ped_std_pe > brightness_limit)
log.info(f'Number of pixels beyond 3 std dev of median: '
f'{too_bright_pixels.sum()}, (above {brightness_limit:.2f} p.e.)')
ped_mask = data_dl1_parameters['event_type'] == 2
# The charges in the images below are obtained with the extractor for
# showers, usually a biased one, like e.g. LocalPeakWindowSum
data_ped_charges = data_dl1_image['image'][ped_mask]
# Exclude too bright pixels, besides those with unusable calibration:
good_pixels &= ~too_bright_pixels
# recalculate the median of the pixels' std dev, with good_pixels:
data_median_std_ped_pe = np.median(data_HG_ped_std_pe[good_pixels])
log.info(f'Good and not too bright pixels: {good_pixels.sum()}')
# all_good is an events*pixels boolean array of valid signals:
all_good = np.reshape(np.tile(good_pixels, data_ped_charges.shape[0]),
data_ped_charges.shape)
# histogram of pedestal charges (biased extractor) from good and not noisy
# pixels:
qbins = 100
qrange = (-10, 15)
dataq = np.histogram(data_ped_charges[all_good].flatten(), bins=qbins,
range=qrange, density=True)
# Find the peak of the pedestal biased charge distribution of real data.
# Use an interpolated version of the histogram, for robustness:
func = interp1d(0.5*(dataq[1][1:]+dataq[1][:-1]), dataq[0],
kind='quadratic', fill_value='extrapolate')
xx = np.linspace(qrange[0], qrange[1], 100*qbins)
mode_data = xx[np.argmax(func(xx))]
# Event reader for simtel file:
mc_reader = EventSource(input_url=simtel_filename, config=Config(config))
# Obtain the configuration with which the pedestal calculations were
# performed:
ped_config = config['LSTCalibrationCalculator']['PedestalIntegrator']
tel_id = ped_config['tel_id']
# Obtain the (unbiased) extractor used for pedestal calculations:
pedestal_extractor_type = ped_config['charge_product']
pedestal_calibrator = CameraCalibrator(
image_extractor_type=pedestal_extractor_type,
config=Config(ped_config),
subarray=mc_reader.subarray
)
# Obtain the (usually biased) extractor used for shower images:
shower_extractor_type = config['image_extractor']
shower_calibrator = CameraCalibrator(
image_extractor_type=shower_extractor_type,
config=Config(config),
subarray=mc_reader.subarray
)
# Since these extractors are now for use on MC, we have to apply the pulse
# integration correction (in data that is currently, as of
# lstchain v0.7.5, replaced by an empirical (hard-coded) correction of the
# adc to pe conversion factors )
pedestal_calibrator.image_extractors[ped_config['charge_product']].apply_integration_correction = True
shower_calibrator.image_extractors[shower_extractor_type].apply_integration_correction = True
# Pulse integration window width of the (biased) extractor for showers:
shower_extractor_window_width = config[config['image_extractor']]['window_width']
# Pulse integration window width for the pedestal estimation:
pedestal_extractor_config = ped_config[pedestal_extractor_type]
pedestal_extractor_window_width = pedestal_extractor_config['window_width']
# MC pedestals integrated with the unbiased pedestal extractor
mc_ped_charges = []
# MC pedestals integrated with the biased shower extractor
mc_ped_charges_biased = []
for event in mc_reader:
if tel_id not in event.trigger.tels_with_trigger:
continue
# Extract the signals as we do for pedestals (unbiased fixed window
# extractor):
pedestal_calibrator(event)
charges = event.dl1.tel[tel_id].image
# True number of pe's from Cherenkov photons (to identify noise-only pixels)
true_image = event.simulation.tel[tel_id].true_image
mc_ped_charges.append(charges[true_image == 0])
# Now extract the signal as we would do for shower events (usually
# with a biased extractor, e.g. LocalPeakWindowSum):
shower_calibrator(event)
charges_biased = event.dl1.tel[tel_id].image
mc_ped_charges_biased.append(charges_biased[true_image == 0])
# All pixels behave (for now) in the same way in MC, just put them together
mc_ped_charges = np.concatenate(mc_ped_charges)
mc_ped_charges_biased = np.concatenate(mc_ped_charges_biased)
mcq = np.histogram(mc_ped_charges_biased, bins=qbins, range=qrange,
density=True)
# Find the peak of the pedestal biased charge distribution of MC. Use
# an interpolated version of the histogram, for robustness:
func = interp1d(0.5*(mcq[1][1:]+mcq[1][:-1]), mcq[0],
kind='quadratic', fill_value='extrapolate')
xx = np.linspace(qrange[0], qrange[1], 100*qbins)
mode_mc = xx[np.argmax(func(xx))]
mc_unbiased_std_ped_pe = np.std(mc_ped_charges)
# Find the additional noise (in data w.r.t. MC) for the unbiased extractor,
# and scale it to the width of the window for integration of shower images.
# The idea is that when a strong signal is present, the biased extractor
# will integrate around it, and the additional noise is unbiased because
# it won't modify the integration range.
extra_noise_in_bright_pixels = \
((data_median_std_ped_pe**2 - mc_unbiased_std_ped_pe**2) *
shower_extractor_window_width / pedestal_extractor_window_width)
# Just in case, makes sure we just add noise if the MC noise is smaller
# than the real data's:
extra_noise_in_bright_pixels = max(0., extra_noise_in_bright_pixels)
bias = mode_data - mode_mc
extra_bias_in_dim_pixels = max(bias, 0)
# differences of values to peak charge:
dq = data_ped_charges[all_good].flatten() - mode_data
dqmc = mc_ped_charges_biased - mode_mc
# maximum distance (in pe) from peak, to avoid strong impact of outliers:
maxq = 10
# calculate widening of the noise bump:
added_noise = (np.sum(dq[dq<maxq]**2)/len(dq[dq<maxq]) -
np.sum(dqmc[dqmc<maxq]**2)/len(dqmc[dqmc < maxq]))
added_noise = (max(0, added_noise))**0.5
extra_noise_in_dim_pixels = added_noise
return extra_noise_in_dim_pixels, extra_bias_in_dim_pixels, \
extra_noise_in_bright_pixels
def tune_nsb_on_waveform(waveform, added_nsb_fraction, original_nsb,
dt, pulse_templates, gain, charge_spe_cumulative_pdf):
"""
Inject single photon pulses in existing R1 waveforms to increase NSB.
Parameters
----------
waveform: charge (p.e. / ns) in each pixel and sampled time
added_nsb_fraction: fraction of the original NSB in simulation to be added
original_nsb: original NSB rate (astropy unit Hz)
dt: time between waveform samples (astropy unit s)
pulse_templates: `lstchain.data.NormalizedPulseTemplate` containing
the single p.e. pulse template used for the injection
gain: gain channel identifier for each pixel
charge_spe_cumulative_pdf: `scipy.interpolate.interp1d` Single p.e. gain
fluctuation cumulative pdf used to randomise the normalisation of
injected pulses
"""
n_pixels, n_samples = waveform.shape
duration = (20 + n_samples) * dt # TODO check needed time window, effect of edges
t = np.arange(-20, n_samples) * dt.value
mean_added_nsb = (added_nsb_fraction * original_nsb) * duration
rng = np.random.default_rng()
additional_nsb = rng.poisson(mean_added_nsb, n_pixels)
added_nsb_time = rng.uniform(-20 * dt.value, -20 * dt.value + duration.value, (n_pixels, max(additional_nsb)))
added_nsb_amp = charge_spe_cumulative_pdf(rng.uniform(size=(n_pixels, max(additional_nsb))))
baseline_correction = (added_nsb_fraction * original_nsb * dt).value
waveform -= baseline_correction
for i in range(n_pixels):
for j in range(additional_nsb[i]):
waveform[i] += (added_nsb_amp[i][j]
* (pulse_templates(t[20:] - added_nsb_time[i][j], 'HG' if gain[i] else 'LG')))
def calculate_required_additional_nsb(simtel_filename, data_dl1_filename, config=None):
# TODO check if good estimation
# TODO reduce duplicated code with 'calculate_noise_parameters'
"""
Calculates the additional NSB needed in the MC waveforms
to match a real data DL1 file
Parameters
----------
simtel_filename: a simtel file containing showers, from the production
(same NSB and telescope settings) as the one on which the correction will
be applied. It must contain pixel-wise info on true number of p.e.'s from
C-photons (will be used to identify pixels which only contain noise).
data_dl1_filename: a real data DL1 file (processed with calibration
settings corresponding to those with which the MC is to be processed).
It must contain calibrated images, i.e. "DL1a" data. This file has the
"target" NSB which we want to have in the MC files, for better
agreement of data and simulations.
config: configuration containing the calibration
settings used for processing both the data and the MC files above
Returns
-------
extra_nsb: Fraction of the additional NSB in data compared to MC.
data_ped_variance: Pedestal variance from data
mc_ped_variance: Pedestal variance from MC
"""
log.setLevel(logging.INFO)
if config is None:
config = standard_config
# Real data DL1 tables:
data_dl1_calibration = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/calibration')
data_dl1_pedestal = read_table(data_dl1_filename,
'/dl1/event/telescope/monitoring/pedestal')
unusable = data_dl1_calibration['unusable_pixels']
# Locate pixels with HG declared unusable either in original calibration or
# in interleaved events:
bad_pixels = unusable[0][0] # original calibration
for tf in unusable[1:][0]: # calibrations with interleaved
bad_pixels = np.logical_or(bad_pixels, tf)
good_pixels = ~bad_pixels
# First index: 1,2,... = values from interleaved (0 is for original
# calibration run)
# Second index: 0 = high gain
# Third index: pixels
# HG adc to pe conversion factors from interleaved calibrations:
data_HG_dc_to_pe = data_dl1_calibration['dc_to_pe'][:, 0, :]
# Pixel-wise pedestal standard deviation (for an unbiased extractor),
# in adc counts:
data_HG_ped_std = data_dl1_pedestal['charge_std'][1:, 0, :]
# indices which connect each pedestal calculation to a given calibration:
calibration_id = data_dl1_pedestal['calibration_id'][1:]
# convert pedestal st deviations to p.e.
dummy = []
for i, x in enumerate(data_HG_ped_std[:, ]):
dummy.append(x * data_HG_dc_to_pe[calibration_id[i],])
dummy = np.array(dummy)
# Average for all interleaved calibrations (in case there are more than one)
data_HG_ped_std_pe = np.mean(dummy, axis=0) # one value per pixel
# Identify noisy pixels, likely containing stars - we want to adjust MC to
# the average diffuse NSB across the camera
data_median_std_ped_pe = | np.median(data_HG_ped_std_pe) | numpy.median |
'''
------------------------------------------------------------------------
Last updated 7/13/2015
Functions for generating omega, the T x S array which describes the
demographics of the population
This py-file calls the following other file(s):
utils.py
data\demographic\demographic_data.csv
data\demographic\mortality_rates.csv
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/fert_rates.png
OUTPUT/mort_rates.png
OUTPUT/survival_rate.png
OUTPUT/cum_mort_rate.png
OUTPUT/imm_rates.png
OUTPUT/Population.png
OUTPUT/Population_growthrate.png
OUTPUT/omega_init.png
OUTPUT/omega_ss.png
------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------
Packages
------------------------------------------------------------------------
'''
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import numpy.polynomial.polynomial as poly
import scipy.optimize as opt
import utils
cur_path = os.path.split(os.path.abspath(__file__))[0]
DEMO_DIR = os.path.join(cur_path, "data", "demographic")
pd.options.mode.chained_assignment = None
'''
------------------------------------------------------------------------
Import data sets
------------------------------------------------------------------------
Population data:
Obtained from:
Annual Estimates of the Resident Population by Single Year of
Age and Sex: April 1, 2010 to July 1, 2013
(Both sexes)
National Characteristics, Vintage 2013
US Census Bureau
http://www.census.gov/popest/data/national/asrh/2013/index.html
Mortality rates data:
Obtained from:
Male and Female death probabilities
Actuarial Life table, 2010
Social Security Administration
http://www.ssa.gov/oact/STATS/table4c6.html
Fertility rates data:
Obtained from:
Births and birth rates, by age of mother, US, 2010
National Vital Statistics Reports, CDC
http://www.cdc.gov/nchs/data/nvsr/nvsr60/nvsr60_02.pdf
Since rates are per 1000 women, the data is divided by 1000
------------------------------------------------------------------------
'''
# Population data
demo_file = utils.read_file(cur_path, "data/demographic/demographic_data.csv")
data = pd.read_table(demo_file, sep=',', header=0)
data = data.set_index('Age')
# Remove commas in the data
for index, value in enumerate(data['2010']):
data['2010'][index] = int(value.replace(',', ''))
for index, value in enumerate(data['2011']):
data['2011'][index] = int(value.replace(',', ''))
for index, value in enumerate(data['2012']):
data['2012'][index] = int(value.replace(',', ''))
for index, value in enumerate(data['2013']):
data['2013'][index] = int(value.replace(',', ''))
# Create a copy of the data to be used elsewhere, without changing the
# main data
data_raw = data.copy(deep=True)
# Mortality rates data
#mort_data = pd.read_table(os.path.join(DEMO_DIR, 'mortality_rates.csv'), sep=',')
mort_file = utils.read_file(cur_path, "data/demographic/mortality_rates.csv")
mort_data = pd.read_table(mort_file, sep=',')
# Remove commas in the data
for index, value in enumerate(mort_data['male_weight']):
mort_data['male_weight'][index] = float(value.replace(',', ''))
for index, value in enumerate(mort_data['female_weight']):
mort_data['female_weight'][index] = float(value.replace(',', ''))
# Average male and female death rates
mort_data['mort_rate'] = (
(np.array(mort_data.male_death.values).astype(float) * np.array(
mort_data.male_weight.values).astype(float)) + (np.array(
mort_data.female_death.values).astype(float) * np.array(
mort_data.female_weight.values).astype(float))) / (
np.array(mort_data.male_weight.values).astype(float) + np.array(
mort_data.female_weight.values).astype(float))
mort_data = mort_data[mort_data.mort_rate.values < 1]
del mort_data['male_death'], mort_data[
'female_death'], mort_data['male_weight'], mort_data[
'female_weight'], mort_data['male_expectancy'], mort_data[
'female_expectancy']
# As the data gives the probability of death, one minus the rate will
# give the survial rate
mort_data['surv_rate'] = 1 - mort_data.mort_rate
# Create an array of death rates of children
# Fertility rates data
fert_data = np.array(
[.4, 34.3, 17.3, 58.3, 90.0, 108.3, 96.6, 45.9, 10.2, .7]) / 1000
# Fertility rates are given in age groups of 5 years, so the following
# are the midpoints of those groups
age_midpoint = np.array([12, 17, 16, 18.5, 22, 27, 32, 37, 42, 49.5])
'''
------------------------------------------------------------------------
Define functions
------------------------------------------------------------------------
'''
def fit_exp_right(params, point1, point2):
# Fit exponentials to two points for right tail of distributions
a, b = params
x1, y1 = point1
x2, y2 = point2
error1 = a*b**(-x1) - y1
error2 = a*b**(-x2) - y2
return [error1, error2]
def fit_exp_left(params, point1, point2):
# Fit exponentials to two points for left tail of distributions
a, b = params
x1, y1 = point1
x2, y2 = point2
error1 = a*b**(x1) - y1
error2 = a*b**(x2) - y2
return [error1, error2]
def exp_int(points, a, b):
top = a * ((1.0/(b**40)) - b**(-points))
bottom = np.log(b)
return top / bottom
def integrate(func, points):
params_guess = [1, 1]
a, b = opt.fsolve(fit_exp_right, params_guess, args=(
[40, poly.polyval(40, func)], [49.5, .0007]))
func_int = poly.polyint(func)
integral = np.empty(points.shape)
integral[points <= 40] = poly.polyval(points[points <= 40], func_int)
integral[points > 40] = poly.polyval(40, func_int) + exp_int(
points[points > 40], a, b)
return np.diff(integral)
'''
------------------------------------------------------------------------
Survival Rates
------------------------------------------------------------------------
'''
def get_survival(S, starting_age, ending_age, E):
'''
Parameters:
S - Number of age cohorts (scalar)
starting_age = initial age of cohorts (scalar)
ending_age = ending age of cohorts (scalar)
E = number of children (scalar)
Returns:
surv_array - S x 1 array of survival rates for each age cohort
children_rate - starting_age x 1 array of survival
rates for children
'''
mort_rate = np.array(mort_data.mort_rate)
mort_poly = poly.polyfit(np.arange(mort_rate.shape[0]), mort_rate, deg=18)
mort_int = poly.polyint(mort_poly)
child_rate = poly.polyval(np.linspace(0, starting_age, E+1), mort_int)
child_rate = np.diff(child_rate)
mort_rate = poly.polyval(
np.linspace(starting_age, ending_age, S+1), mort_int)
mort_rate = np.diff(mort_rate)
child_rate[child_rate < 0] = 0.0
mort_rate[mort_rate < 0] = 0.0
return 1.0 - mort_rate, 1.0 - child_rate
'''
------------------------------------------------------------------------
Immigration Rates
------------------------------------------------------------------------
'''
pop_2010, pop_2011, pop_2012, pop_2013 = np.array(
data_raw['2010'], dtype='f'), np.array(
data_raw['2011'], dtype='f'), np.array(
data_raw['2012'], dtype='f'), np.array(
data_raw['2013'], dtype='f')
def get_immigration1(S, starting_age, ending_age, pop_2010, pop_2011, E):
'''
Parameters:
S - Number of age cohorts
starting_age - initial age of cohorts
pop1 - initial population
pop2 - population one year later
Returns:
im_array - S+E x 1 array of immigration rates for each
age cohort
'''
# Get survival rates for the S age groups
surv_array, children_rate = get_survival(
ending_age-starting_age, starting_age, ending_age, starting_age)
surv_array = np.array(list(children_rate) + list(surv_array))
# Only keep track of individuals in 2010 that don't die
pop_2010 = pop_2010[:ending_age]
# In 2011, individuals will have aged one year
pop_2011 = pop_2011[1:ending_age+1]
# The immigration rate will be 1 plus the percent change in
# population (since death has already been accounted for)
perc_change = ((pop_2011 - pop_2010) / pop_2010)
# Remove the last entry, since individuals in the last period will die
im_array = perc_change - (surv_array - 1)
return im_array
def get_immigration2(S, starting_age, ending_age, E):
'''
Parameters:
S - Number of age cohorts
starting age - initial age of cohorts
Returns:
im_array - S x 1 array of immigration rates for each
age cohort
child_imm_rate - starting_age x 1 array of immigration
rates for children
'''
imm_rate_condensed1 = get_immigration1(
S, starting_age, ending_age, pop_2010, pop_2011, E)
imm_rate_condensed2 = get_immigration1(
S, starting_age, ending_age, pop_2011, pop_2012, E)
imm_rate_condensed3 = get_immigration1(
S, starting_age, ending_age, pop_2012, pop_2013, E)
im_array = (
imm_rate_condensed1 + imm_rate_condensed2 + imm_rate_condensed3) / 3.0
poly_imm = poly.polyfit(np.linspace(
1, ending_age, ending_age-1), im_array[:-1], deg=18)
poly_imm_int = poly.polyint(poly_imm)
child_imm_rate = poly.polyval(np.linspace(
0, starting_age, E+1), poly_imm_int)
imm_rate = poly.polyval(np.linspace(
starting_age, ending_age, S+1), poly_imm_int)
child_imm_rate = np.diff(child_imm_rate)
imm_rate = np.diff(imm_rate)
imm_rate[-1] = 0.0
return imm_rate, child_imm_rate
'''
------------------------------------------------------------------------
Fertility Rates
------------------------------------------------------------------------
'''
def get_fert(S, starting_age, ending_age, E):
'''
Parameters:
S - Number of age cohorts
starting age - initial age of cohorts
Returns:
fert_rate - Sx1 array of fertility rates for each
age cohort
children_fertrate - starting_age x 1 array of zeros, to be
used in get_omega()
'''
# Fit a polynomial to the fertility rates
poly_fert = poly.polyfit(age_midpoint, fert_data, deg=4)
fert_rate = integrate(poly_fert, np.linspace(
starting_age, ending_age, S+1))
fert_rate /= 2.0
children_fertrate_int = poly.polyint(poly_fert)
children_fertrate_int = poly.polyval(np.linspace(
0, starting_age, E + 1), children_fertrate_int)
children_fertrate = np.diff(children_fertrate_int)
children_fertrate /= 2.0
children_fertrate[children_fertrate < 0] = 0
children_fertrate[:int(10*S/float(ending_age-starting_age))] = 0
return fert_rate, children_fertrate
'''
------------------------------------------------------------------------
Generate graphs of mortality, fertility, and immigration rates
------------------------------------------------------------------------
'''
def rate_graphs(S, starting_age, ending_age, imm, fert, surv, child_imm,
child_fert, child_mort, output_dir="./OUTPUT"):
domain = np.arange(child_fert.shape[0] + S) + 1
mort = mort_data.mort_rate
domain2 = np.arange(mort.shape[0]) + 1
domain4 = np.arange(child_imm.shape[0] + imm.shape[0]) + 1
# Graph of fertility rates
plt.figure()
plt.plot(
domain, list(child_fert)+list(fert), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'fertility $f_s$')
fert_rates = os.path.join(output_dir, "Demographics/fert_rates")
plt.savefig(fert_rates)
# Graph of mortality rates
plt.figure()
plt.plot(domain2[:ending_age-1], (1-np.array(list(child_mort)+list(surv)))[:-1], color='blue', linewidth=2)
plt.plot(domain2[ending_age:], mort[
ending_age:], color='blue', linestyle='--', linewidth=2)
plt.axvline(x=ending_age, color='red', linestyle='-', linewidth=1)
plt.xlabel(r'age $s$')
plt.ylabel(r'mortality $\rho_s$')
mort_rates = os.path.join(output_dir, "Demographics/mort_rates")
plt.savefig(mort_rates)
cum_surv_arr = np.cumprod(surv)
domain3 = np.arange(surv.shape[0]) + 1
# Graph of cumulative mortality rates
plt.figure()
plt.plot(domain3, cum_surv_arr)
plt.xlabel(r'age $s$')
plt.ylabel(r'survival rate $1-\rho_s$')
surv_rates = os.path.join(output_dir, "Demographics/survival_rates")
plt.savefig(surv_rates)
cum_mort_rate = 1-cum_surv_arr
plt.figure()
plt.plot(domain3, cum_mort_rate)
plt.xlabel(r'age $s$')
plt.ylabel(r'cumulative mortality rate')
cum_mort_rates = os.path.join(output_dir, "Demographics/cum_mort_rate")
plt.savefig(cum_mort_rates)
# Graph of immigration rates
plt.figure()
plt.plot(domain4, list(
child_imm)+list(imm), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'immigration $i_s$')
imm_rates = os.path.join(output_dir, "Demographics/imm_rates")
plt.savefig(imm_rates)
'''
------------------------------------------------------------------------
Generate graphs of Population
------------------------------------------------------------------------
'''
def pop_graphs(S, T, starting_age, ending_age, children, g_n, omega,
output_dir="./OUTPUT"):
N = omega[T].sum() + children[T].sum()
x = children.sum(1) + omega.sum(1)
x2 = 100 * np.diff(x)/x[:-1]
plt.figure()
plt.plot(np.arange(T+S)+1, x, 'b', linewidth=2)
plt.title('Population Size (as a percent of the initial population)')
plt.xlabel(r'Time $t$')
# plt.ylabel('Population size, as a percent of initial population')
pop = os.path.join(output_dir, "Demographics/imm_rates")
plt.savefig(pop)
plt.figure()
plt.plot(np.arange(T+S-1)+1, x2, 'b', linewidth=2)
plt.axhline(y=100 * g_n, color='r', linestyle='--', label=r'$\bar{g}_n$')
plt.legend(loc=0)
plt.xlabel(r'Time $t$')
plt.ylabel(r'Population growth rate $g_n$')
# plt.title('Population Growth rate over time')
pop_growth = os.path.join(output_dir, "Demographics/Population_growthrate")
plt.savefig(pop_growth)
plt.figure()
plt.plot(np.arange(S+int(starting_age * S / (
ending_age-starting_age)))+1, list(
children[0, :]) + list(
omega[0, :]), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'$\omega_{s,1}$')
omega_init = os.path.join(output_dir, "Demographics/omega_init")
plt.savefig(omega_init)
plt.figure()
plt.plot(np.arange(S+int(starting_age * S / (
ending_age-starting_age)))+1, list(
children[T, :]/N) + list(
omega[T, :]/N), linewidth=2, color='blue')
plt.xlabel(r'age $s$')
plt.ylabel(r'$\overline{\omega}$')
omega_ss = os.path.join(output_dir, "Demographics/omega_ss")
plt.savefig(omega_ss)
'''
------------------------------------------------------------------------
Generate Demographics
------------------------------------------------------------------------
'''
def get_omega(S, T, starting_age, ending_age, E, flag_graphs):
'''
Inputs:
S - Number of age cohorts (scalar)
T - number of time periods in TPI (scalar)
starting_age - initial age of cohorts (scalar)
ending_age = ending age of cohorts (scalar)
E = number of children (scalar)
flag_graphs = graph variables or not (bool)
Outputs:
omega_big = array of all population weights over time ((T+S)x1 array)
g_n_SS = steady state growth rate (scalar)
omega_SS = steady state population weights (Sx1 array)
surv_array = survival rates (Sx1 array)
rho = mortality rates (Sx1 array)
g_n_vec = population growth rate over time ((T+S)x1 array)
'''
data1 = data
pop_data = np.array(data1['2010'])
poly_pop = poly.polyfit(np.linspace(
0, pop_data.shape[0]-1, pop_data.shape[0]), pop_data, deg=11)
poly_int_pop = poly.polyint(poly_pop)
pop_int = poly.polyval(np.linspace(
starting_age, ending_age, S+1), poly_int_pop)
new_omega = pop_int[1:]-pop_int[:-1]
surv_array, children_rate = get_survival(S, starting_age, ending_age, E)
surv_array[-1] = 0.0
imm_array, children_im = get_immigration2(S, starting_age, ending_age, E)
#imm_array *= 0.0
#children_im *= 0.0
fert_rate, children_fertrate = get_fert(S, starting_age, ending_age, E)
cum_surv_rate = | np.cumprod(surv_array) | numpy.cumprod |
from mikkel_tools.MiClass import MiClass
import mikkel_tools.utility as mt_util
import matplotlib.pyplot as plt
import pyshtools
import scipy.linalg as spl
import pickle
import numpy as np
import mikkel_tools.GMT_tools as gt
import os
#import utility as sds_util
class SDSS(MiClass):
""" Class for performing spherical direct sequential simulation """
def __init__(self, comment, N_SH = 60, sim_type = "core", sat_height = 350, N_SH_secondary = None):
super().__init__(sat_height = sat_height)
self.comment = comment
self.class_abs_path = os.path.dirname(__file__)
# Initial constants related to spherical harmonics and Earth system size.
self.N_SH = N_SH
self.N_SH_secondary = N_SH_secondary
self.sim_type = sim_type
def make_grid(self, r_grid, grid, calc_sph_d = False, N_grid = 1000):
# Initialize
self.r_grid = r_grid
self.grid = grid
self.sph_d = None
# Generate equal area grid
if isinstance(grid,str):
self.N_grid = N_grid
N_grid_orig = self.N_grid
check_flag = False
if grid == "equal_area":
while check_flag is False:
points_polar = mt_util.eq_point_set_polar(self.N_grid) # Compute grid with equal area grid functions
# Set lat and lon from estimated grid
self.lon = points_polar[:,0]*180/np.pi
self.lat = 90 - points_polar[:,1]*180/np.pi
# Determine equal area grid specifics used for defining the integration area
s_cap, n_regions = mt_util.eq_caps(self.N_grid)
self.n_regions = n_regions.T
self.s_cap = s_cap
if self.N_grid == int(np.sum(n_regions)):
check_flag = True
if N_grid_orig - self.N_grid != 0:
print("")
print("___ CHANGES TO GRID ___")
print("N = {}, not compatible for equal area grid".format(N_grid_orig))
print("N has been set to {}".format(self.N_grid))
else:
self.N_grid -= 1
self.handle_poles()
# Generate Gauss-Legendre quadrature grid
elif grid == "gauss_leg":
self.gauss_leg_n_from_N = int(np.ceil(np.sqrt(self.N_grid/2))) # Approximate required Gauss-Legendre grid size from defined N_grid
gauss_leg = np.polynomial.legendre.leggauss(self.gauss_leg_n_from_N) # Use built-in numpy function to generate grid
# Set lat and lon range from estimated grid
lat = 90-np.flipud(np.arccos(gauss_leg[0]).reshape(-1,1))*180/np.pi
lon = np.arange(0,2*np.pi,np.pi/self.gauss_leg_n_from_N)*180/np.pi
weights, none = np.meshgrid(gauss_leg[1],lon,indexing='ij') # Get weights for quadrature on grid
self.weights = np.ravel(weights)
# Compute full lat/lon grid
lat, lon = np.meshgrid(lat,lon,indexing='ij')
self.lon = lon.ravel()
self.lat = lat.ravel()
self.N_grid = 2*self.gauss_leg_n_from_N**2 # Update N_grid
# Generate Lebedev quadrature grid
elif grid == "lebedev":
import quadpy
# Lebedev grid generation from quadpy is limited to the following two choices
if self.N_grid >= 5000:
scheme = quadpy.sphere.lebedev_131()
else:
scheme = quadpy.sphere.lebedev_059()
# Set lat and lon from estimated grid
coords = scheme.azimuthal_polar
self.lon = 180+coords[:,0]*180/np.pi
self.lat = 90-coords[:,1]*180/np.pi
self.weights = np.ravel(scheme.weights) # Get weights for quadrature on grid
self.N_grid = len(self.weights) # Update N_grid according to Lebedev grid
else:
self.lon = grid[:,0]
self.lat = grid[:,1]
self.N_grid = len(self.lon)
# Compute spherical distances between all points on grid if required
if calc_sph_d is True:
lon_mesh, lat_mesh = np.meshgrid(self.lon, self.lat, indexing='ij')
self.sph_d = mt_util.haversine(self.r_grid, lon_mesh, lat_mesh, lon_mesh.T, lat_mesh.T)
def handle_poles(self):
import numpy as np
# Remove the first and last grid points (the poles) and the corresponding structure related components
idx_end_core = self.N_grid-1
self.lat = np.delete(self.lat,[0,idx_end_core],0)
self.lon = np.delete(self.lon,[0,idx_end_core],0)
self.N_grid = idx_end_core-1
self.n_regions = np.delete(self.n_regions,-1,1)
self.n_regions = np.delete(self.n_regions,0,1)
self.s_cap = np.delete(self.s_cap,-1,0)
self.s_cap = np.delete(self.s_cap,0,0)
self.N_grid = idx_end_core-1
if self.sph_d is not None:
self.sph_d = np.delete(self.sph_d,[0,idx_end_core],0)
self.sph_d = np.delete(self.sph_d,[0,idx_end_core],1)
def data(self, *args):
# Generate design matrix for grid
A_r, A_theta, A_phi = gt.design_SHA(self.r_grid/self.a, (90.0-self.lat)*self.rad, self.lon*self.rad, self.N_SH)
G = np.vstack((A_r, A_theta, A_phi))
# Load Gauss coefficients from data files
if np.logical_or(self.sim_type == "core", self.sim_type == "sat"):
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Julien_Gauss_JFM_E-8_snap.dat')
elif self.sim_type == "surface":
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Masterton_13470_total_it1_0.glm')
else:
Gauss_in = np.loadtxt(args[0], comments='%')
# Compute Gauss coefficients as vector
g = mt_util.gauss_vector(Gauss_in, self.N_SH, i_n = 2, i_m = 3)
# Generate field data
#data_dynamo = np.matrix(G)*np.matrix(g).T
data_dynamo = np.matmul(G,g.T)
data = np.array(data_dynamo[:len(A_r)]).ravel()
self.data = np.zeros((self.N_grid,))
self.data = data.copy()
self.r_grid_repeat = np.ones(self.N_grid,)*self.r_grid
# Target statistics
self.target_var = np.var(self.data)
self.target_mean = 0.0
def load_swarm(self, dataset, use_obs = False, target_var = None, target_var_factor = None):
# Load swarm samples
data_swarm = {"SW_A":np.loadtxt("swarm_data/SW_A_AprilMayJune18_dark_quiet_NEC.txt",comments="%"), "SW_B":np.loadtxt("swarm_data/SW_B_AprilMayJune18_dark_quiet_NEC.txt",comments="%"), "SW_C":np.loadtxt("swarm_data/SW_C_AprilMayJune18_dark_quiet_NEC.txt",comments="%")}
if dataset == "A":
data_swarm = {"obs":data_swarm["SW_A"][:,13], "radius":data_swarm["SW_A"][:,1], "theta":(data_swarm["SW_A"][:,2]), "phi":data_swarm["SW_A"][:,3], "N":data_swarm["SW_A"][:,13].shape[0]}
elif dataset == "B":
data_swarm = {"obs":data_swarm["SW_B"][:,13], "radius":data_swarm["SW_B"][:,1], "theta":(data_swarm["SW_B"][:,2]), "phi":data_swarm["SW_B"][:,3], "N":data_swarm["SW_B"][:,13].shape[0]}
elif dataset == "C":
data_swarm = {"obs":data_swarm["SW_C"][:,13], "radius":data_swarm["SW_C"][:,1], "theta":(data_swarm["SW_C"][:,2]), "phi":data_swarm["SW_C"][:,3], "N":data_swarm["SW_C"][:,13].shape[0]}
elif dataset == "ABC":
data_swarm = {"obs":np.hstack((data_swarm["SW_A"][:,13],data_swarm["SW_B"][:,13],data_swarm["SW_C"][:,13])),
"radius":np.hstack((data_swarm["SW_A"][:,1],data_swarm["SW_B"][:,1],data_swarm["SW_C"][:,1])),
"theta":np.hstack(((data_swarm["SW_A"][:,2]),(data_swarm["SW_B"][:,2]),(data_swarm["SW_C"][:,2]))),
"phi":np.hstack((data_swarm["SW_A"][:,3],data_swarm["SW_B"][:,3],data_swarm["SW_C"][:,3])),
"N":np.hstack((data_swarm["SW_A"][:,13],data_swarm["SW_B"][:,13],data_swarm["SW_C"][:,13])).shape[0]}
self.grid_theta = data_swarm["theta"]
self.grid_phi = data_swarm["phi"]
self.grid_radial = data_swarm["radius"]
self.grid_obs = data_swarm["obs"]
self.grid_N = data_swarm["N"]
if use_obs == True:
self.data = self.grid_obs
# Target statistics
if target_var_factor is not None:
self.target_var = target_var_factor*np.var(self.data)
elif target_var == None:
self.target_var = np.var(self.data)
else:
self.target_var = target_var
self.target_mean_true = np.mean(self.data)
self.target_mean = 0.0
def generate_map(self, grid_type = "glq", target_var = None, target_var_factor = None, *args):
# Load Gauss coefficients from data files
if np.logical_or(self.sim_type == "core", self.sim_type == "sat"):
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Julien_Gauss_JFM_E-8_snap.dat')
elif self.sim_type == "core_alt":
import hdf5storage
#g_ens = np.genfromtxt("mikkel_tools/models_shc/gnm_midpath.dat").T*10**9
g_ens = -(hdf5storage.loadmat("mikkel_tools/models_shc/Gauss_Bsurf_2021.mat")["gnm"].T)[:,:].copy()
self.ensemble_B(g_ens, nmax = self.N_SH, r_at = self.r_cmb, grid_type = "glq")
self.m_ens = self.B_ensemble[:,0,:].copy()[:,200:]
var_ens = np.var(self.m_ens, axis=0)
idx_close_to_var = np.argwhere(np.logical_and(var_ens>0.9995*np.mean(var_ens), var_ens<1.0005*np.mean(var_ens)))
g = np.ravel(g_ens[:,idx_close_to_var[-1]])
N_SH_max = self.N_SH
self.ens_idx = int(idx_close_to_var[-1])
elif self.sim_type == "core_ens":
g_ens = np.genfromtxt("mikkel_tools/models_shc/gnm_midpath.dat").T*10**9
g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),:]
self.ensemble_B(g_ens, nmax = self.N_SH, r_at = self.r_cmb, grid_type = "glq")
self.m_ens = self.B_ensemble[:,0,:].copy()[:,200:]
var_ens = np.var(self.m_ens, axis=0)
idx_close_to_var = np.argwhere(np.logical_and(var_ens>0.9995*np.mean(var_ens), var_ens<1.0005*np.mean(var_ens)))
g = np.ravel(g_ens[:,idx_close_to_var[-1]])
N_SH_max = self.N_SH
self.ens_idx = int(idx_close_to_var[-1])
#self.g_ens = g_ens
elif self.sim_type == "lith_ens":
#g_ens = np.load("mikkel_tools/models_shc/lithosphere_g_in_rotated.npy")
g_ens = np.load("mikkel_tools/models_shc/LiP_ensemble_N500_n120_p05_vary_crust.npy")
#self.lith_ens_cut = 100
#g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),::self.lith_ens_cut]
g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),:]
#R = mt_util.lowe_shspec(self.N_SH, self.a, self.a, g_ens)
#g_ens = g_ens[:,np.mean(R,axis=0)>5]
self.ensemble_B(g_ens, nmax = self.N_SH, r_at = self.a, grid_type = "glq")
self.m_ens = self.B_ensemble[:,0,:].copy()
var_ens = np.var(self.m_ens, axis=0)
idx_close_to_var = np.argwhere(np.logical_and(var_ens>0.95*np.mean(var_ens), var_ens<1.05*np.mean(var_ens)))
g = np.ravel(g_ens[:,idx_close_to_var[-1]])
N_SH_max = self.N_SH
self.ens_idx = int(idx_close_to_var[-1])
elif self.sim_type == "surface":
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Masterton_13470_total_it1_0.glm')
elif self.sim_type == "separation":
Gauss_in_core = np.loadtxt('mikkel_tools/models_shc/Julien_Gauss_JFM_E-8_snap.dat')
Gauss_in_lithos = np.loadtxt('mikkel_tools/models_shc/Masterton_13470_total_it1_0.glm')
g_c = mt_util.gauss_vector(Gauss_in_core, self.N_SH, i_n = 2, i_m = 3)
g_l = mt_util.gauss_vector(Gauss_in_lithos, self.N_SH_secondary, i_n = 2, i_m = 3)
g_zip = (g_c,g_l)
idx_zip_min = np.argmin((g_c.shape[0],g_l.shape[0]))
idx_zip_max = np.argmax((g_c.shape[0],g_l.shape[0]))
g = g_zip[idx_zip_max].copy()
g[:g_zip[idx_zip_min].shape[0]] += g_zip[idx_zip_min]
N_SH_max = np.max((self.N_SH, self.N_SH_secondary))
else:
Gauss_in = np.loadtxt(args[0], comments='%')
if np.logical_and.reduce((self.sim_type != "separation", self.sim_type != "core_ens", self.sim_type != "lith_ens", self.sim_type != "core_alt")):
# Compute Gauss coefficients as vector
g = mt_util.gauss_vector(Gauss_in, self.N_SH, i_n = 2, i_m = 3)
N_SH_max = self.N_SH
# Generate field
self.ensemble_B(g, nmax = N_SH_max, N_mf = 2, mf = True, nmf = False, r_at = self.r_grid, grid_type = grid_type)
self.data = self.B_ensemble[:,0]
del self.B_ensemble
"""
if grid_type == "glq":
self.data = self.B_ensemble_glq[:,0]
del self.B_ensemble_glq
elif grid_type == "even":
self.data = self.B_ensemble_even[:,0]
del self.B_ensemble_even
elif grid_type == "eqa":
self.data = self.B_ensemble_eqa[:,0]
del self.B_ensemble_eqa
elif grid_type == "swarm":
self.data = self.B_ensemble_swarm[:,0]
"""
if grid_type != "swarm":
self.r_grid_repeat = np.ones(self.N_grid,)*self.r_grid
# Target statistics
if target_var_factor is not None:
self.target_var = target_var_factor*np.var(self.data)
elif target_var == None:
self.target_var = np.var(self.data)
else:
self.target_var = target_var
self.target_mean_true = np.mean(self.data)
self.target_mean = 0.0
self.g_prior = g
def condtab(self, normsize = 1001, model_hist = False, table = 'rough', quantiles = None,
rangn_lim = 3.5, rangn_N = 501, rangv_lim = 2.0, rangv_N = 101, rangn_geomspace = False):
"""
Conditional distribution table
"""
import numpy as np
from scipy.stats import norm, laplace
from sklearn.preprocessing import QuantileTransformer
# Linearly spaced value array with start/end very close to zero/one
start = 1e-16 #Python min
#start = 0.001
linspace = np.linspace(start,1-start,normsize)
# Possible model target histogram cdf/ccdf
if isinstance(model_hist, str) is False:
data_sorted = np.ravel(model_hist)
elif model_hist == True:
ag,bg = laplace.fit(self.data)
mod_data = np.random.laplace(ag,bg,size=100000)
#data_sorted = np.sort(mod_data)
data_sorted = mod_data
elif model_hist == "laplace":
rv = laplace()
self.data = laplace.rvs(loc = 0, scale=1, size=self.N_grid)
self.target_var = np.var(self.data)
self.target_mean = 0.0
#data_sorted = np.sort(self.data)
data_sorted = self.data
set_nmax = self.grid_nmax
C_cilm = pyshtools.expand.SHExpandGLQ(self.data.reshape(self.grid_nmax+1,2*self.grid_nmax+1), self.grid_w_shtools, self.grid_zero, [1, 1, set_nmax])
C_index = np.transpose(pyshtools.shio.SHCilmToCindex(C_cilm))
self.g_prior = mt_util.gauss_vector_zeroth(C_index, set_nmax, i_n = 0, i_m = 1)
self.g_cilm = C_cilm.copy()
elif model_hist == "ensemble":
data_sorted = np.ravel(self.m_ens)
data_sorted = data_sorted[0.5*np.max(np.abs(data_sorted))>np.abs(data_sorted)]
#data_sorted = np.delete(data_sorted, np.abs(data_sorted)>np.max(np.abs(data_sorted))*0.5)
else:
#data_sorted = np.sort(self.data)
data_sorted = self.data
if rangn_geomspace == False:
rangn = np.linspace(-rangn_lim,rangn_lim,rangn_N)
else:
rangn = np.vstack((np.geomspace(-rangn_lim,-start,int(rangn_N/2)).reshape(-1,1),np.zeros((1,1)),np.geomspace(start,rangn_lim,int(rangn_N/2)).reshape(-1,1)))
rangv = np.linspace(start,rangv_lim,rangv_N)
# Normscored local conditional distributions
# Initialize matrices
CQF_dist = np.zeros((len(rangn),len(rangv),len(linspace)))
CQF_mean = np.zeros((len(rangn),len(rangv)))
CQF_var = np.zeros((len(rangn),len(rangv)))
# Perform quantile transformation
if quantiles == None:
quantiles = int(0.1*len(data_sorted))
# QuantileTransformer setup
qt = QuantileTransformer(n_quantiles=quantiles, random_state=None, output_distribution='normal',subsample=10e8)
qt.fit(data_sorted.reshape(-1,1))
#vrg = qt.transform(data_sorted.reshape(-1,1))
# Generate CQF distributions, means, and variances
print("")
for i in range(0,len(rangn)):
for j in range(0,len(rangv)):
#CQF_dist[i,j,:] = np.sort(qt.inverse_transform((norm.ppf(linspace,loc=rangn[i],scale=np.sqrt(rangv[j]))).reshape(-1,1)).ravel(),axis=0)
CQF_dist[i,j,:] = qt.inverse_transform((norm.ppf(linspace,loc=rangn[i],scale=np.sqrt(rangv[j]))).reshape(-1,1)).ravel()
CQF_mean[i,j] = np.mean(CQF_dist[i,j,:],axis=0,dtype=np.float64)
CQF_var[i,j] = np.var(CQF_dist[i,j,:],axis=0,ddof=1,dtype=np.float64)
#CQF_var[i,j] = np.var(CQF_dist[i,j,:],axis=0,ddof=0,dtype=np.float64)
self.CQF_dist = CQF_dist
self.CQF_mean = CQF_mean
self.CQF_var = CQF_var
self.rangv = rangv
self.rangn = rangn
self.condtab_normsize = normsize
self.condtab_model_hist = model_hist
self.condtab_table = table
#condtab = {"target variance":target_var, "target variance_dat":target_var_dat, "target mean":target_mean, "target mean_dat":target_mean_dat, "QF norm range":rangn, "QF var range":rangv, "CQF dist":CQF_dist, "CQF mean":CQF_mean, "CQF var":CQF_var, "target normscore":vrg, "compiler":setup["condtab_compiler"], "normsize":normsize, "start":start}
def find_sort_d(self, max_dist = 2000):
import numpy as np
sph_d_ravel = self.sph_d.ravel()
range_d = sph_d_ravel < max_dist
idx_range = np.array(np.where(range_d == True)).ravel()
val_range = sph_d_ravel[idx_range]
idx_sort_val_range = np.argsort(val_range)
self.sort_d = idx_range[idx_sort_val_range]
def data_variogram(self, max_dist = 11000):
"""
Function for calculating variogram from data
"""
import numpy as np
self.find_sort_d(max_dist = max_dist)
cloud_all = np.zeros([self.N_grid, self.N_grid])
for i in range(0,self.N_grid):
#cloud = (self.data[i]-self.data)**2
cloud = 0.5*(self.data[i]-self.data)**2
cloud_all[i,:] = cloud
self.cloud_sorted = cloud_all.ravel()[self.sort_d]
self.sph_d_sorted = self.sph_d.ravel()[self.sort_d]
def data_semivariogram(self, max_cloud, n_lags):
"""
Function for calculating semivariogram from data by taking the mean of
equidistant lags
"""
import numpy as np
pics = np.zeros(n_lags-1)
lags = np.zeros(n_lags-1)
#pic_zero = 0.5*np.mean(self.cloud_sorted[:self.N_grid])
pic_zero = np.mean(self.cloud_sorted[:self.N_grid])
lag_zero = np.mean(self.sph_d_sorted[:self.N_grid])
pics[0] = pic_zero
lags[0] = lag_zero
lags_geom = np.linspace(self.N_grid+2, max_cloud, n_lags, dtype=int)
for n in np.arange(0,n_lags-2):
#pic = 0.5*np.mean(self.cloud_sorted[lags_geom[n]:lags_geom[n+1]:1])
pic = np.mean(self.cloud_sorted[lags_geom[n]:lags_geom[n+1]:1])
pics[n+1] = pic
lag_c = np.mean(self.sph_d_sorted[lags_geom[n]:lags_geom[n+1]:1])
lags[n+1] = lag_c
self.lags = lags
self.pics = pics
def semivariogram_model(self, h, a, C0, C1, C2 = None, C3 = None, sv_mode = 'spherical'):
import numpy as np
if sv_mode == 'spherical':
'''
Spherical model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 )
sv_model[len(hla):] = C0 + C1
sv_model = sv_model[hir]
elif sv_mode == 'dub_spherical':
'''
Spherical model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
ha2 = h>C3
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*( 1.5*hla/C3 - 0.5*(hla/C3)**3)
sv_model[len(hla):] = C0 + C1 + C2*( 1.5*hs[len(hla):]/C3 - 0.5*(hs[len(hla):]/C3)**3)
sv_model[ha2[hi]] = C0 + C1 + C2
sv_model = sv_model[hir]
elif sv_mode == 'gaussian':
'''
Gaussian model of the semivariogram
'''
sv_model = C0 + C1*(1-np.exp(-(3*np.ravel(h))**2/a**2))
elif sv_mode == 'exponential':
'''
Exponential model of the semivariogram
'''
import numpy as np
sv_model = C0 + C1*(1-np.exp(-3*h/a))
#sv_model = C0 + C1*(1-np.exp(-h/a))
#sv_model = C0 + C1*(np.exp(-h/a))
elif sv_mode == 'power':
'''
Power model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*hla**a
sv_model[len(hla):] = C0 + C1*np.array(hs[len(hla):])**a
sv_model = sv_model[hir]
elif sv_mode == 'hole':
'''
Hole model of the semivariogram
'''
sv_model = C0 + C1*(1-np.cos(h/a*np.pi))
elif sv_mode == 'hole_damp':
'''
Hole model of the semivariogram
'''
sv_model = C0 + C1*(1-np.exp(-3*h/C2)*np.cos(h/a*np.pi))
elif sv_mode == 'nested_hole_gau':
'''
Hole model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*(1-np.cos(hla/a*np.pi)) + C2*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1*(1-np.cos(np.array(hs[len(hla):])/a*np.pi)) + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
elif sv_mode == 'nested_sph_gau':
'''
Nested spherical and gaussian model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1 + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
elif sv_mode == 'nested_sph_exp':
'''
Nested spherical and exponential model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*(1-np.exp(-(3*hla)/a))
sv_model[len(hla):] = C0 + C1 + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))/a))
sv_model = sv_model[hir]
elif sv_mode == 'nested_exp_gau':
'''
Nested exponential and gaussian model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*(1-np.exp(-(3*hla)/a)) + C2*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1*(1-np.exp(-(3*np.array(hs[len(hla):]))/a)) + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
elif sv_mode == 'nested_sph_exp_gau':
'''
Nested spherical and exponential model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*(1-np.exp(-(3*hla)/a)) + C3*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1 + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))/a)) + C3*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
else:
print('Unknown model type')
return
return sv_model
def varioLUT(self, a, C0, C1, C2 = None, C3 = None, sv_model = 'spherical'):
import numpy as np
#from SDSSIM_utility import printProgressBar
'''
semi-variogram LUT generation
'''
#vario_lut = np.longdouble(np.zeros([self.N_grid, self.N_grid]))
vario_lut = np.double(np.zeros([self.N_grid, self.N_grid]))
for i in range(0,self.N_grid):
vario_lut[:,i] = self.semivariogram_model(self.sph_d[i,:], a, C0, C1, C2=C2, C3=C3, sv_mode=sv_model)
return vario_lut
def semivar(self, model_lags = 'all', model = 'nested_sph_exp_gau', max_dist = 11000, lag_length = 5,
nolut = False, bounds = True, zero_nugget = False, set_model = False, hit_target_var = False):
from math import inf
import numpy as np
from scipy.optimize import curve_fit
#from sklearn.preprocessing import normalize
self.sv_model_lags = model_lags
self.sv_max_dist = max_dist
self.sv_lag_length = lag_length
self.sv_zero_nugget = zero_nugget
self.data_variogram(max_dist=max_dist)
self.max_cloud = len(self.sort_d)
d_max = np.max(self.sph_d_sorted)
self.n_lags = int(d_max/lag_length) # lags from approx typical distance between core grid points
print("____semi-variogram setup___")
print("")
print("Number of data used: %d" %self.max_cloud)
print("Max data distance: %.3f km" %d_max)
print("Lag length chosen: %.1f km" %lag_length)
print("Number of lags: %d" %self.n_lags)
print("Number of modelling lags:",model_lags)
print("")
self.data_semivariogram(self.max_cloud, self.n_lags)
#print('Generating semi-variogram model')
#print("")
if model_lags == 'all':
lags_model = self.lags
pics_model = self.pics
else:
lags_model = self.lags[:model_lags]
pics_model = self.pics[:model_lags]
# Set model name for plotting and logicals for model selection
self.model_names = {'spherical':'spherical', 'dub_spherical':'double spherical', 'gaussian':'gaussian', 'exponential':'exponential', 'power':'power', 'hole':'hole', 'hole_damp':'dampened hole', 'nested_hole_gau':'hole+Gaussian', 'nested_sph_gau':'spherical+Gaussian', 'nested_sph_exp':'spherical+exponential', 'nested_exp_gau':'exponential+Gaussian', 'nested_sph_exp_gau':'spherical+exponential+Gaussian'}
self.model_select_simple = np.logical_or.reduce((model=='nested_sph_gau', model=='nested_sph_exp', model=='nested_exp_gau', model=='nested_hole_gau', model=='hole_damp'))
self.model_select_advanced = np.logical_or.reduce((model == 'nested_sph_exp_gau', model == 'dub_spherical'))
"""SET MODEL OR NOT"""
if set_model == False:
if model == 'spherical':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3)
elif hit_target_var == True:
def semivar_return(lags_model, a):
return (1.5*lags_model/a-0.5*(lags_model/a)**3)
else:
def semivar_return(lags_model, a, C1):
return C1*(1.5*lags_model/a-0.5*(lags_model/a)**3)
elif model == 'dub_spherical':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1, C2, C3):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1.5*lags_model/C3-0.5*(lags_model/C3)**3)
else:
def semivar_return(lags_model, a, C1, C2, C3):
return C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1.5*lags_model/C3-0.5*(lags_model/C3)**3)
elif model == 'gaussian':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1-np.exp(-(3*lags_model)**2/a**2))
else:
def semivar_return(lags_model, a, C1):
return C1*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'exponential':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1-np.exp(-3*lags_model/a))
#return C0 + C1*(1-np.exp(-lags_model/a))
#return C0 + C1*(np.exp(-lags_model/a))
elif hit_target_var == True:
def semivar_return(lags_model, a):
return (1-np.exp(-3*lags_model/a))
else:
def semivar_return(lags_model, a, C1):
return C1*(1-np.exp(-3*lags_model/a))
elif model == 'power':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*lags_model**a
else:
def semivar_return(lags_model, a, C1):
return C1*lags_model**a
elif model == 'hole':
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1-np.cos(lags_model/a*np.pi))
elif model == 'hole_damp':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1-np.exp(-3*lags_model/C2)*np.cos(lags_model/a*np.pi))
elif model == 'nested_hole_gau':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1-np.cos(lags_model/a*np.pi)) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'nested_sph_gau':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'nested_sph_exp':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)/a))
elif model == 'nested_exp_gau':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1-np.exp(-(3*lags_model)/a)) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
else:
def semivar_return(lags_model, a, C1, C2):
return C1*(1-np.exp(-(3*lags_model)/a)) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'nested_sph_exp_gau':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1, C2, C3):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)/a)) + C3*(1-np.exp(-(3*lags_model)**2/a**2))
else:
def semivar_return(lags_model, a, C1, C2, C3): # FOR ZERO NUGGET
return C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)/a)) + C3*(1-np.exp(-(3*lags_model)**2/a**2)) # FOR ZERO NUGGET
else:
print('wrong model type chosen')
if bounds == True:
"""Bounds and start values for curve fit"""
if model == 'power':
if zero_nugget == False:
p0 = [2.0,np.min(pics_model),np.max(pics_model)]
bounds = (0, [2.0, inf, inf])
else:
p0 = [2.0,np.max(pics_model)]
bounds = (0, [2.0, inf])
elif np.logical_or(model=='nested_sph_gau',model=='nested_sph_exp'):
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model)])
elif model=='nested_exp_gau':
if zero_nugget == False:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model)])
else:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], np.max(pics_model), np.max(pics_model)])
elif model=='nested_hole_gau':
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model)])
elif model=='hole_damp':
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),5*np.max(lags_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), 10*np.max(lags_model)])
elif model == 'nested_sph_exp_gau':
if zero_nugget == False:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model),np.max(pics_model)])
else:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], np.max(pics_model), np.max(pics_model),np.max(pics_model)])
elif model == 'dub_spherical':
if zero_nugget == False:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model),np.mean(lags_model[-int(len(lags_model)/2.0)])]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model),lags_model[-1]])
else:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.mean(lags_model[-int(len(lags_model)/2.0)])]
bounds = (0, [lags_model[-1], np.max(pics_model), np.max(pics_model),lags_model[-1]])
else:
if zero_nugget == False:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model)])
elif hit_target_var == True:
p0 = [np.max(pics_model)]
bounds = (0, [np.max(pics_model)])
else:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.max(pics_model)]
bounds = (0, [lags_model[-1], np.max(pics_model)])
if hit_target_var == True:
pics_model_in = pics_model/self.target_var
popt, pcov = curve_fit(semivar_return, lags_model, pics_model_in, bounds=bounds, p0 = p0)
else:
popt, pcov = curve_fit(semivar_return, lags_model, pics_model, bounds=bounds, p0 = p0)
else:
popt, pcov = curve_fit(semivar_return, lags_model, pics_model, method='lm')
self.lags_model = lags_model
self.pics_model = pics_model
"""Calculate or define nugget"""
if zero_nugget == False:
C0 = popt[1]
C1 = popt[2]
C2 = None
C3 = None
if self.model_select_simple:
C2 = popt[3]
elif self.model_select_advanced:
C2 = popt[3]
C3 = popt[4]
elif hit_target_var == True:
C0 = 0.0 # FOR ZERO NUGGET
C1 = self.target_var
C2 = None
C3 = None
else:
C0 = 0.0 # FOR ZERO NUGGET
C1 = popt[1] # FOR ZERO NUGGET
C2 = None
C3 = None
if self.model_select_simple:
C2 = popt[2]
elif self.model_select_advanced:
C2 = popt[2] # FOR ZERO NUGGET
C3 = popt[3] # FOR ZERO NUGGET
"""Calculate or define correlation length"""
a = popt[0]
else:
a = set_model["a"]
C0 = set_model["C0"]
C1 = set_model["C1"]
C2 = set_model["C2"]
C3 = set_model["C3"]
"""Spherical model prediction"""
#lags_sv_curve = np.arange(0,int(np.round(lags[-1]))) # Very weird bug when using this for Gaussian model at lengths > 15K
self.lags_sv_curve = np.linspace(0, int(np.round(self.lags[-1])), len(self.lags))
if self.model_select_simple:
self.sv_curve = self.semivariogram_model(self.lags_sv_curve, a, C0, C1, C2 = C2, sv_mode = model)
elif self.model_select_advanced:
self.sv_curve = self.semivariogram_model(self.lags_sv_curve, a, C0, C1, C2 = C2, C3 = C3, sv_mode = model)
else:
self.sv_curve = self.semivariogram_model(self.lags_sv_curve, a, C0, C1, sv_mode = model)
print('Semi-variogram model determined, starting LUT computation')
print("")
if nolut == False:
if self.model_select_simple:
self.sv_lut = self.varioLUT(a, C0, C1, C2 = C2, sv_model = model)
elif self.model_select_advanced:
self.sv_lut = self.varioLUT(a, C0, C1, C2 = C2, C3 = C3, sv_model = model)
else:
self.sv_lut = self.varioLUT(a, C0, C1, sv_model = model)
# Set model in class
self.model = model
self.a_sv = a
self.C0 = C0
self.C1 = C1
self.C2 = C2
self.C3 = C3
def cov_model(self, r_at = None, N_cut = 200):
if r_at == None:
r_at = self.a
#tap_to = tap_to + 1 # One extra for overlap between R_add and R
#n_tap = self.N_SH + tap_to - 1 # And one less in the sum as a result
# g ensemble and parameters
if self.sim_type == "core_ens":
g_ens = np.genfromtxt("mikkel_tools/models_shc/gnm_midpath.dat").T*10**9
elif self.sim_type == "lith_ens":
#g_ens = np.load("mikkel_tools/models_shc/lithosphere_g_in_rotated.npy")
g_ens = np.load("mikkel_tools/models_shc/LiP_ensemble_N500_n120_p05_vary_crust.npy")
g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),:]
if self.sim_type == "core_ens":
g_cut = g_ens[:self.N_SH*(2+self.N_SH),N_cut:] # Truncate g
elif self.sim_type == "lith_ens":
#g_cut = g_ens[:self.N_SH*(2+self.N_SH),::self.lith_ens_cut]
g_cut = g_ens[:self.N_SH*(2+self.N_SH),:]
R = mt_util.lowe_shspec(self.N_SH, r_at, self.a, g_cut)
R = R[:,np.mean(R,axis=0)>5]
# Angular distance matrix
c_angdist = np.cos(mt_util.haversine(1, self.grid_phi.reshape(1,-1), 90-self.grid_theta.reshape(1,-1),
self.grid_phi.reshape(-1,1), 90-self.grid_theta.reshape(-1,1)))
c_unique, c_return = np.unique(np.ravel(c_angdist), return_inverse = True)
# Compute constants based on Chris' note eqn. 11
C_const = (np.arange(1,self.N_SH+1)+1)/(2*np.arange(1,self.N_SH+1)+1)
# Constant and R
CR = C_const.reshape(-1,1)*R
# Generate matrix of all required Schmidt semi-normalized legendre polynomials
Pn = []
for cmu in c_unique:
Pn.append(pyshtools.legendre.PlSchmidt(self.N_SH,cmu)[1:].reshape(-1,))
Pn = np.array(Pn)[:,:]
#Pn = np.array(Pn).reshape((c_angdist.shape[0],c_angdist.shape[1],-1))
# Determine covariance model according to eqn. 11
C_Br_model = np.mean(Pn@CR,axis=1)[c_return].reshape((c_angdist.shape[0],c_angdist.shape[1]))
#if c_angdist.shape[0] <= 2000:
# C_Br = Pn@CR
# C_Br_model = np.mean(C_Br,axis=2)
#else:
# C_Br = np.zeros((self.grid_N, self.grid_N, 1))
# for i in np.arange(0,R.shape[1]):
# C_Br += Pn@CR[:,[i]]
# C_Br_model = C_Br[:,:,0]/R.shape[1]
# Positive definite covariance?
core_eigval = spl.eigh(C_Br_model, eigvals_only=True)
N_neg_eigval = len(core_eigval[core_eigval<=0])
print("All eigenvalues > 0:", np.all(core_eigval>=0))
print("Cov model is pos def:", mt_util.is_pos_def(C_Br_model))
if np.all(core_eigval>=0) == False:
print("Number of negative eigenvalues:",N_neg_eigval,"/",len(core_eigval))
# Save covariance model variable
self.C_ens = C_Br_model
def cov_model_taper(self, r_at = None, tap_to = 500, tap_exp_p1 = 5, tap_exp_p2 = 2,
tap_scale_start = 0, tap_scale_end = 24, plot_taper = False,
save_fig = False, save_string = "", save_dpi = 300, N_cut = 200):
if r_at == None:
r_at = self.a
tap_to = tap_to + 1 # One extra for overlap between R_add and R
n_tap = self.N_SH + tap_to - 1 # And one less in the sum as a result
# g ensemble and parameters
if self.sim_type == "core_ens":
g_ens = np.genfromtxt("mikkel_tools/models_shc/gnm_midpath.dat").T*10**9
elif self.sim_type == "core_alt":
import hdf5storage
g_ens = -(hdf5storage.loadmat("mikkel_tools/models_shc/Gauss_Bsurf_2021.mat")["gnm"].T)[:,:].copy()
elif self.sim_type == "lith_ens":
#g_ens = np.load("mikkel_tools/models_shc/lithosphere_g_in_rotated.npy")
g_ens = np.load("mikkel_tools/models_shc/LiP_ensemble_N500_n120_p05_vary_crust.npy")
g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),:]
if self.sim_type == "core_ens":
g_cut = g_ens[:self.N_SH*(2+self.N_SH),N_cut:] # Truncate g
elif self.sim_type == "core_alt":
g_cut = g_ens[:self.N_SH*(2+self.N_SH),N_cut:] # Truncate g
elif self.sim_type == "lith_ens":
#g_cut = g_ens[:self.N_SH*(2+self.N_SH),::self.lith_ens_cut]
g_cut = g_ens[:self.N_SH*(2+self.N_SH),:]
R = mt_util.lowe_shspec(self.N_SH, r_at, self.a, g_cut)
R = R[:,np.mean(R,axis=0)>5]
# Angular distance matrix
c_angdist = np.cos(mt_util.haversine(1, self.grid_phi.reshape(1,-1), 90-self.grid_theta.reshape(1,-1),
self.grid_phi.reshape(-1,1), 90-self.grid_theta.reshape(-1,1)))
c_unique, c_return = np.unique(np.ravel(c_angdist), return_inverse = True)
# Compute covariances based on Chris' note eqn. 11
C_const = (np.arange(1,n_tap+1)+1)/(2*np.arange(1,n_tap+1)+1)
# Generate matrix of all required Schmidt semi-normalized legendre polynomials
Pn = []
for cmu in c_unique:
Pn.append(pyshtools.legendre.PlSchmidt(n_tap,cmu)[1:].reshape(-1,))
Pn = np.array(Pn)[:,:]
#Pn = np.array(Pn).reshape((c_angdist.shape[0],c_angdist.shape[1],-1))
# Define taper with inverse powered exponential sum
lin_exp = np.linspace(tap_scale_start, tap_scale_end, tap_to)
tap_exp = (0.5*np.exp(-tap_exp_p1*lin_exp) + 0.5*np.exp(-tap_exp_p2*lin_exp)).reshape(-1,1)
# Take taper as factor on last spectra values and add to true prior spectra
R_add = R[-1,:]*tap_exp
R_tap = np.vstack((R,R_add[1:,:]))
# Constant and R
CR = C_const.reshape(-1,1)*R_tap
# Determine covariance model according to eqn. 11
C_Br_model = np.mean(Pn@CR,axis=1)[c_return].reshape((c_angdist.shape[0],c_angdist.shape[1]))
#if c_angdist.shape[0] <= 2000:
# C_Br = Pn@CR
# C_Br_model = np.mean(C_Br,axis=2)
#else:
# C_Br = np.zeros((self.grid_N, self.grid_N, 1))
# for i in np.arange(0,R.shape[1]):
# C_Br += Pn@CR[:,[i]]
# C_Br_model = C_Br[:,:,0]/R.shape[1]
# Positive definite covariance?
core_eigval = spl.eigh(C_Br_model, eigvals_only=True)
N_neg_eigval = len(core_eigval[core_eigval<=0])
print("All eigenvalues > 0:", np.all(core_eigval>=0))
print("Cov model is pos def:", mt_util.is_pos_def(C_Br_model))
if np.all(core_eigval>=0) == False:
print("Number of negative eigenvalues:",N_neg_eigval,"/",len(core_eigval))
# Save covariance model variable
self.C_ens_tap = C_Br_model
# Generate plot to show taper
if plot_taper == True:
lin_exp = np.linspace(tap_scale_start,tap_scale_end,10000)
lin_deg = np.linspace(1,tap_to,10000)
tap_exp = (0.5*np.exp(-tap_exp_p1*lin_exp) + 0.5*np.exp(-tap_exp_p2*lin_exp)).reshape(-1,1)
R_show = R[-1,:]*tap_exp
# Spectra
fig, axes = plt.subplots(1, 2, figsize=(10,4))
for i in np.arange(R_tap.shape[1]):
if i == 0:
axes[0].plot(np.arange(1,n_tap+1),R_tap[:,i],color=(0.6,0.6,0.6),label="Tapered ensemble")
axes[0].plot(lin_deg+self.N_SH-1,R_show[:,self.ens_idx],zorder = 10, label ="Taper function for highlight")
axes[0].plot(np.arange(1,n_tap+1)[:self.N_SH],R_tap[:self.N_SH,self.ens_idx],"o",zorder = 11, label = "Ensemble highlight truth")
axes[0].plot(np.arange(1,n_tap+1)[self.N_SH:],R_tap[self.N_SH:,self.ens_idx],"o",zorder = 11, label = "Ensemble highlight taper")
axes[1].plot(np.arange(1,n_tap+1),R_tap[:,i],color=(0.6,0.6,0.6),label="Tapered ensemble")
axes[1].plot(lin_deg+self.N_SH-1,R_show[:,self.ens_idx],zorder = 10, label ="Taper function for highlight")
axes[1].plot(np.arange(1,n_tap+1)[:self.N_SH],R_tap[:self.N_SH,self.ens_idx],"o",zorder = 11, label = "Ensemble highlight truth")
axes[1].plot(np.arange(1,n_tap+1)[self.N_SH:],R_tap[self.N_SH:,self.ens_idx],"o",zorder = 11, label = "Ensemble highlight taper")
else:
axes[0].plot(np.arange(1,n_tap+1),R_tap[:,i],color=(0.6,0.6,0.6))
axes[1].plot(np.arange(1,n_tap+1),R_tap[:,i],color=(0.6,0.6,0.6))
axes[0].set_xlim(self.N_SH-5,self.N_SH+10)
#axes[0].set_ylim(0,1.5*10**10)
axes[0].set_ylim(0,1.2*np.max(R_tap[self.N_SH,:]))
axes[1].set_xlim(0,tap_to/2)
#axes[1].set_ylim(0, 10**10)
axes[1].set_ylim(0, np.max(R_tap[self.N_SH,:]))
axes[0].legend(fontsize="small")
axes[1].legend(fontsize="small")
axes[0].set_ylabel("Power [$nT^2$]")
axes[0].set_xlabel("SH degree, n")
axes[1].set_ylabel("Power [$nT^2$]")
axes[1].set_xlabel("SH degree, n")
fig.suptitle('Taper function: $f_t = 0.5e^{{-{}n}} + 0.5e^{{-{}n}}$'.format(tap_exp_p1, tap_exp_p2), fontsize=10)
if save_fig == True:
fig.savefig('cov_taper_{}.pdf'.format(save_string), bbox_inches='tight', dpi = save_dpi)
plt.show()
def sv_m_DSS(self,N,N_sim,m_DSS,sort_d,n_lags,max_cloud):
"""
NEW Function for calculating semivariogram from simulations by taking the mean of
equidistant lags
"""
pics_m_DSS = np.zeros([n_lags-1,N_sim])
for j in np.arange(0,N_sim):
cloud_all = np.zeros([N,N])
for i in np.arange(0,N):
cloud = 0.5*(m_DSS[i,j]-m_DSS[:,j])**2
cloud_all[i,:] = cloud
pics_c = np.zeros(n_lags-1)
cloud_ravel = np.ravel(cloud_all)[sort_d]
pic_zero = np.mean(cloud_ravel[:N])
#pic_zero = 0.5*np.mean(cloud_ravel[:N])
pics_c[0] = pic_zero
lags_geom = np.linspace(N+2,max_cloud,n_lags,dtype=int)
for n in np.arange(0,n_lags-2):
#pic = 0.5*np.mean(cloud_ravel[lags_geom[n]:lags_geom[n+1]:1])
pic = np.mean(cloud_ravel[lags_geom[n]:lags_geom[n+1]:1])
pics_c[n+1] = pic
pics_m_DSS[:,j] = pics_c
self.pics_m_DSS = pics_m_DSS
def integrating_kernel(self, obs_obj, C_e_const = 2, print_ti_est_res = False, C_mm_supply = None):
G_mcal = mt_util.Gr_vec(self.r_grid, obs_obj.r_grid, self.lat, obs_obj.lat, self.lon, obs_obj.lon)
self.G = np.pi/(self.grid_nmax+0.5)*np.multiply(self.grid_w,G_mcal) # +0.5 for parity with SHTOOLS
C_e = np.diag(C_e_const**2*np.ones(obs_obj.grid_N,)) # No need to store C_e outside of here
if C_mm_supply is None:
self.C_mm_all = self.target_var-self.sv_lut
else:
self.C_mm_all = C_mm_supply
C_dm_all = self.G*self.C_mm_all
self.C_dd = C_dm_all*self.G.T + C_e
self.C_dm_all = C_dm_all.T
self.C_e_const = C_e_const
if print_ti_est_res == True:
# Compute forward and get residuals to synthetic observations
fwd_leg = self.G*self.data.reshape(-1,1)
fwd_leg_res = obs_obj.data - fwd_leg.reshape(-1,)
# RMSE
rmse_leg = np.sqrt(np.mean(np.power(fwd_leg_res,2)))
print("")
print("Gauss-Legendre RMSE:\t %0.12f" %rmse_leg)
plt.figure()
y,binEdges=np.histogram(fwd_leg_res,bins=200)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
plt.plot(bincenters,y,'C0',label="Gauss-Legendre")
plt.xlabel("Radial field residuals [nT]")
plt.ylabel("Count")
plt.legend()
plt.show()
def covmod_lsq_equiv(self, obs, C_mm, G, r_at, geomag_scale = True):
obs = obs.reshape(-1,1)
C_e = np.zeros((len(obs),len(obs)))
C_e[np.arange(1,len(obs)),np.arange(1,len(obs))] = self.C_e_const**2
S = C_e + [email protected][email protected]
T = np.linalg.inv(S)
self.m_equiv_lsq = [email protected]@T@obs
self.lsq_equiv_pred = [email protected]_equiv_lsq
self.lsq_equiv_res = obs - self.lsq_equiv_pred
self.g_equiv_lsq, _ = mt_util.sh_expand_glq(self.m_equiv_lsq, self.grid_nmax, self.grid_w_shtools, self.grid_zero, self.N_SH, geomag_scale = geomag_scale, geomag_r_at = r_at)
#C_cilm = pyshtools.expand.SHExpandGLQ(self.m_equiv_lsq.reshape(self.grid_nmax+1,2*self.grid_nmax+1), self.grid_w_shtools, self.grid_zero, [2, 1, self.grid_nmax])
#C_index = np.transpose(pyshtools.shio.SHCilmToCindex(C_cilm))
#if geomag_scale == True:
# nm_C = mt_util.array_nm(self.grid_nmax)
# C_corr_sh = 1/(nm_C[:,[0]]+1)*1/(self.a/r_at)**(nm_C[:,[0]]+2)
# C_index = C_index[1:,:]*C_corr_sh
#else:
# C_index = C_index[1:,:]
#C_vec = mt_util.gauss_vector(C_index, self.grid_nmax, i_n = 0, i_m = 1)
#self.g_lsq_equiv = C_vec
def covmod_lsq_equiv_sep(self, obs, semivar_c, semivar_l, target_var_c, target_var_l, G_d_sep,
title="", errorvar = 3**2):
d_0 = obs
G = G_d_sep.copy()
C_M_c = target_var_c - semivar_c
C_M_l = target_var_l - semivar_l
C_M = np.zeros((G.shape[1],G.shape[1]))
C_M[:C_M_c.shape[0],:C_M_c.shape[0]] = C_M_c
C_M[-C_M_l.shape[0]:,-C_M_l.shape[0]:] = C_M_l
C_D = np.zeros((len(d_0),len(d_0)))
C_D[np.arange(1,len(d_0)),np.arange(1,len(d_0))] = errorvar
S = C_D + G*C_M*G.T
T = np.linalg.inv(S)
m_equiv_lsq = C_M*G.T*T*d_0
lsq_equiv_pred = G_d_sep*m_equiv_lsq
lsq_equiv_res = obs - lsq_equiv_pred
return m_equiv_lsq, lsq_equiv_pred, lsq_equiv_res
def conditional_lookup(self, mu_k, sigma_sq_k, dm, dv, unit_d = False, scaling = True, return_idx = False):
from scipy.stats import norm
#conditional_lookup(self, cond_mean, cond_var, cond_dist, cond_dist_size, mu_k, sigma_sq_k, dm, dv):
#conditional_lookup(core.CQF_mean, core.CQF_var, core.CQF_dist, core.condtab_normsize, mu_k, sigma_sq_k, dm_c, dv_c)
#dist = np.power((condtab["CQF mean"]-mu_k)/dm,2)+np.power((condtab["CQF var"]-sigma_sq_k)/dv,2)
if unit_d == True:
distance = np.power((self.CQF_mean-mu_k),2)+abs(self.CQF_var-sigma_sq_k)
else:
#distance = np.power((self.CQF_mean-mu_k)/dm,2)+abs(self.CQF_var-sigma_sq_k)/np.sqrt(dv)
distance = abs(self.CQF_mean-mu_k)/dm+abs(self.CQF_var-sigma_sq_k)/dv
nearest = np.unravel_index(np.argmin(distance),self.CQF_mean.shape)
idx_n = nearest[0]
idx_v = nearest[-1]
#if idx_v > 50:
# print(idx_v)
m_i = self.CQF_dist[idx_n,idx_v,np.random.randint(0,self.condtab_normsize,size=1)]
if scaling == True:
m_i_mean = self.CQF_mean[idx_n,idx_v]
m_i_std = np.sqrt(self.CQF_var[idx_n,idx_v],dtype=np.float64)
m_k = (m_i - m_i_mean)*np.sqrt(sigma_sq_k)/m_i_std+mu_k
else:
m_k = m_i
if return_idx == True:
return m_k, (idx_n, idx_v)
else:
return m_k
def run_sim(self, N_sim, N_m, C_mm_all, C_dd, C_dm_all, G, observations, training_image,
observations_direct = None, observations_direct_loc = None, observations_direct_e = None, use_sgs = False,
collect_all = False, scale_m_i = True, unit_d = False, sense_running_error = False, notebook_style = True, save_string = "test",
solve_cho = True, sim_stochastic = False, separation = False, separation_lim = None, separation_obj_1 = None,
separation_obj_2 = None):
import time
import random
import scipy as sp
"""
Input
N_sim:
N_m:
prior_data:
Output
"""
max_cov = np.max(C_mm_all)
#if observations_direct_e is not None:
# C_mm_all[np.arange(self.grid_N),np.arange(self.grid_N)] += observations_direct_e
"""Number of simulations"""
self.N_sim = N_sim
m_DSS = np.zeros((N_m, N_sim))
time_average = np.zeros((N_sim))
"""save variables"""
self.idx_nv_collect = list()
lagrange = list()
self.kriging_mv_collect = list()
rand_paths = list()
invshapes = list()
kriging_weights = list()
kriging_weights_rel_dat = list()
v_cond_vars = list()
lstsq_param = list()
C_dd_in = C_dd
""" Run sequential simulations"""
for realization in range(0,N_sim):
# Start timing
t0 = time.time()
random.seed(a=None)
np.random.seed()
# Initialize sequential simulation with random start
step_rnd_path = np.arange(N_m)
if observations_direct is not None:
step_rnd_path = np.delete(step_rnd_path, observations_direct_loc)
# Randomize index array to create random path
random.shuffle(step_rnd_path)
"""Run spherical direct sequential simulation"""
idx_v = np.empty([0,],dtype=int)
idx_n = np.empty([0,],dtype=int)
data_min = np.min(training_image)
data_max = np.max(training_image)
dm = data_max - data_min
dv = self.target_var
stepped_previously = np.empty([0,],dtype=int)
err_mag_sum = 0.0
len_stepped = 0
# Start random walk
for step in step_rnd_path:
C_mm_var = C_mm_all[step,step]
C_mm = np.empty([0,],dtype=np.longdouble)
C_dm = np.empty([0,],dtype=np.longdouble)
C_vm = np.empty([0,],dtype=np.longdouble)
c_mm = np.empty([0,1],dtype=np.longdouble)
c_dm = np.empty([0,1],dtype=np.longdouble)
c_vm = np.empty([0,1],dtype=np.longdouble)
mu_k = np.empty([0,],dtype=np.longdouble)
sigma_sq_k = np.empty([0,],dtype=np.longdouble)
idx_n = np.empty([0,],dtype=int)
idx_v = np.empty([0,],dtype=int)
m_i = np.empty([0,],dtype=np.longdouble)
m_k = None
err_mag_avg = np.empty([0,],dtype=np.longdouble)
kriging_weights = np.empty([0,],dtype=np.longdouble)
v_cond_var = np.empty([0,],dtype=np.longdouble)
#""" SORT METHOD """
#cov_walked = C_mm_all[step,stepped_previously]
if separation == True:
if step <= separation_lim:
sep_idx = 0
C_dd_in = C_dd[sep_idx]
else:
sep_idx = 1
C_dd_in = C_dd[sep_idx]
"""COV SETUP"""
# Set up m to m
c_mm = C_mm_all[step,stepped_previously].reshape(-1,1)
# Lookup all closest location semi-variances to each other (efficiently)
C_mm = (np.ravel(C_mm_all)[(stepped_previously + (stepped_previously * C_mm_all.shape[1]).reshape((-1,1))).ravel()]).reshape(stepped_previously.size, stepped_previously.size)
# Set up d to m, direct observations etc.
if observations_direct is not None:
if len_stepped == 0:
for step_direct in observations_direct_loc:
stepped_previously = np.append(stepped_previously, step_direct)
len_stepped += 1
m_DSS[stepped_previously,realization] = observations_direct
# Set up m to m
c_mm = C_mm_all[step,stepped_previously].reshape(-1,1)
# Lookup all closest location semi-variances to each other (efficiently)
C_mm = (np.ravel(C_mm_all)[(stepped_previously + (stepped_previously * C_mm_all.shape[1]).reshape((-1,1))).ravel()]).reshape(stepped_previously.size, stepped_previously.size)
if observations is not None:
c_dm = C_dm_all[step,:].reshape(-1,1)
C_dm = C_dm_all[stepped_previously,:]
c_vm = np.vstack((c_mm,c_dm))
C_vm = np.zeros((len(C_dd_in)+len(C_mm),len(C_dd_in)+len(C_mm)))
C_vm[-len(C_dd_in):,-len(C_dd_in):] = C_dd_in
C_vm[:len(C_mm),:len(C_mm)] = C_mm
C_vm[:len(C_mm),-len(C_dd_in):] = C_dm
C_vm[-len(C_dd_in):,:len(C_mm)] = C_dm.T
v_cond_var = m_DSS[stepped_previously,realization].reshape(-1,1)
v_cond_var = np.vstack((v_cond_var,observations.reshape(-1,1)))
else:
v_cond_var = m_DSS[stepped_previously,realization].reshape(-1,1)
c_vm = c_mm
C_vm = C_mm
elif sim_stochastic == False:
c_dm = C_dm_all[step,:].reshape(-1,1)
if len(stepped_previously) >= 1:
C_dm = C_dm_all[stepped_previously,:]
c_vm = np.vstack((c_mm,c_dm))
C_vm = np.zeros((len(C_dd_in)+len(C_mm),len(C_dd_in)+len(C_mm)))
C_vm[-len(C_dd_in):,-len(C_dd_in):] = C_dd_in
if len(stepped_previously) >= 1:
C_vm[:len(C_mm),:len(C_mm)] = C_mm
C_vm[:len(C_mm),-len(C_dd_in):] = C_dm
C_vm[-len(C_dd_in):,:len(C_mm)] = C_dm.T
v_cond_var = m_DSS[stepped_previously,realization].reshape(-1,1)
if len_stepped > 0:
v_cond_var = np.vstack((v_cond_var,observations.reshape(-1,1)))
else:
v_cond_var = observations.reshape(-1,1)
else:
if len_stepped > 1:
v_cond_var = m_DSS[stepped_previously,realization].reshape(-1,1)
c_vm = c_mm
C_vm = C_mm
else:
m_k = self.target_mean
if m_k == None:
"""SIMPLE KRIGING (SK)"""
#self.C_vm = C_vm
if solve_cho == True:
cho_lower = sp.linalg.cho_factor(C_vm)
kriging_weights = sp.linalg.cho_solve(cho_lower,c_vm)
else:
kriging_weights = np.linalg.solve(C_vm,c_vm)
#kriging_weights[kriging_weights<0.01] = 0.0
#sigma_sq_k = self.target_var - np.float(kriging_weights.reshape(1,-1)@c_vm)
sigma_sq_k = C_mm_var - np.float(kriging_weights.reshape(1,-1)@c_vm)
#sigma_sq_k = max_cov - np.float(kriging_weights.reshape(1,-1)@c_vm)
if sigma_sq_k < 0.0:
print("")
print("Negative kriging variance: %s" %sigma_sq_k)
print("")
kriging_weights[kriging_weights<0] = 0
#sigma_sq_k = self.target_var - np.float(kriging_weights.reshape(1,-1)@c_vm)
sigma_sq_k = C_mm_var - np.float(kriging_weights.reshape(1,-1)@c_vm)
#sigma_sq_k = max_cov - np.float(kriging_weights.reshape(1,-1)@c_vm)
mu_k = np.float(np.array(kriging_weights.reshape(1,-1)@(v_cond_var - self.target_mean) + self.target_mean))
if use_sgs == False:
if collect_all == True:
if separation == True:
dv = C_mm_var
if sep_idx == 0:
dm = np.max(training_image[:separation_lim]) - np.min(training_image[:separation_lim])
m_k, idx_nv = separation_obj_1.conditional_lookup(mu_k, sigma_sq_k, dm, dv, scaling = scale_m_i, unit_d = unit_d, return_idx = True)
else:
dm = np.max(training_image[separation_lim:]) - np.min(training_image[separation_lim:])
m_k, idx_nv = separation_obj_2.conditional_lookup(mu_k, sigma_sq_k, dm, dv, scaling = scale_m_i, unit_d = unit_d, return_idx = True)
else:
m_k, idx_nv = self.conditional_lookup(mu_k, sigma_sq_k, dm, dv, scaling = scale_m_i, unit_d = unit_d, return_idx = True)
self.idx_nv_collect.append(idx_nv)
self.kriging_mv_collect.append((mu_k, sigma_sq_k))
else:
if separation == True:
dv = C_mm_var
if sep_idx == 0:
dm = np.max(training_image[:separation_lim]) - np.min(training_image[:separation_lim])
m_k = separation_obj_1.conditional_lookup(mu_k, sigma_sq_k, dm, dv, scaling = scale_m_i, unit_d = unit_d, return_idx = False)
else:
dm = np.max(training_image[separation_lim:]) - np.min(training_image[separation_lim:])
m_k = separation_obj_2.conditional_lookup(mu_k, sigma_sq_k, dm, dv, scaling = scale_m_i, unit_d = unit_d, return_idx = False)
else:
m_k = self.conditional_lookup(mu_k, sigma_sq_k, dm, dv, scaling = scale_m_i, unit_d = unit_d, return_idx = False)
else:
m_k = mu_k
m_DSS[step,realization] = m_k
# Count locations walked for search neighborhood
stepped_previously = np.append(stepped_previously, step)
len_stepped += 1
# Get running sense of size of error compared to prior
if sense_running_error == True:
err_mag = np.log10(float(np.abs((training_image)[step]-m_k)))
err_mag_sum += err_mag
err_mag_avg = float(err_mag_sum/len_stepped)
mt_util.printProgressBar (len(stepped_previously), N_m, err_mag_avg, subject = ' realization nr. %d' % realization, notebook_style = notebook_style)
elif sense_running_error == None:
pass
else:
mt_util.printProgressBar (len(stepped_previously), N_m, subject = ' realization nr. %d' % realization, notebook_style = notebook_style)
# End timing
t1 = time.time()
# Plot statistics of realization
time_average[realization] = (t1-t0)
if time_average[realization] < 60:
print('Run time: %.3f' %(time_average[realization]), 'seconds', '')
elif time_average[realization] < 3600:
print('Run time: %.3f' %(time_average[realization]*60**(-1)), 'minutes', '')
else:
print('Run time: %.3f' %(time_average[realization]*60**(-2)), 'hours', '')
if np.sum(time_average[:(realization+1)])*60**(-1) > 60:
print('Total elapsed time: %.3f' %(np.sum(time_average[:(realization+1)])*60**(-2)), 'hours', '')
else:
print('Total elapsed time: %.3f' %(np.sum(time_average[:(realization+1)])*60**(-1)), 'minutes', '')
print('Variance: %.3f' %np.var(m_DSS[:,realization]))
print('Mean: %.3f' %np.mean(m_DSS[:,realization]))
print('Max: %.3f' %np.max(m_DSS[:,realization]))
print('Min: %.3f' %np.min(m_DSS[:,realization]))
print('Run nr.:', realization+1)
print('')
# Save realizations after each step
np.save("m_DSS_{}".format(save_string), m_DSS[:,:realization])
self.m_DSS = m_DSS
if G is not None:
self.m_DSS_pred = [email protected]_DSS
self.m_DSS_res = observations.reshape(-1,1) - self.m_DSS_pred
rmse_leg = np.sqrt(np.mean(np.power(self.m_DSS_res,2),axis=0))
print("")
print("Seqsim RMSE:\t {}".format(rmse_leg))
# color_rgb = (0.6,0.6,0.6)
# plt.figure()
# for i in np.arange(0,N_sim):
# y,binEdges=np.histogram(self.m_DSS_res[:,[i]],bins=200)
# bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
# if i == 0:
# plt.plot(bincenters,y,'-',color = color_rgb,label='Seqsim')
# else:
# plt.plot(bincenters,y,'-',color = color_rgb)
# plt.xlabel("Radial field residuals [nT]")
# plt.ylabel("Count")
# plt.show()
m_DSS_mean = np.mean(self.m_DSS,axis=-1).reshape(-1,1)@np.ones((1,N_sim))
if N_sim > 1:
self.C_DSS = 1/(N_sim-1)*(self.m_DSS-m_DSS_mean)@(self.m_DSS-m_DSS_mean).T
def realization_to_sh_coeff(self, r_at, set_nmax = None, set_norm = 1, geomag_scale = True):
#self.grid_glq(nmax = self.N_SH, r_at = r_at)
if set_nmax == None:
set_nmax = self.grid_nmax
self.g_spec = []
for i in np.arange(0,self.N_sim):
C_vec, _ = mt_util.sh_expand_glq(self.m_DSS[:,[i]], self.grid_nmax, self.grid_w_shtools, self.grid_zero, set_nmax, set_norm = set_norm, geomag_scale = geomag_scale, geomag_r_at = r_at)
self.g_spec.append(C_vec)
self.g_spec = np.array(self.g_spec).T
self.g_spec_mean = np.mean(self.g_spec,axis=1)
def run_sim_sep(self, N_sim):
import time
import random
kriging_method = "simple"
"""
Possible kriging_method(s):
- simple
"""
"""Number of simulations"""
m_DSS = np.zeros((core.grid_N + lithos.grid_N, N_sim))
time_average = np.zeros((N_sim))
"""save variables"""
idx_nv = list()
lagrange = list()
kriging_mv = list()
rand_paths = list()
invshapes = list()
kriging_weights = list()
kriging_weights_rel_dat = list()
v_cond_vars = list()
lstsq_param = list()
prior_data = np.hstack((core.data,lithos.data))
""" Run sequential simulations"""
for realization in range(0,N_sim):
# Start timing
t0 = time.time()
random.seed(a=None)
np.random.seed()
# Initialize sequential simulation with random start
step_rnd_path = np.arange(core.grid_N + lithos.grid_N)
# Randomize index array to create random path
random.shuffle(step_rnd_path)
"""Run spherical direct sequential simulation"""
idx_v = np.empty([0,],dtype=int)
idx_n = np.empty([0,],dtype=int)
data_min_c = np.min(core.data)
data_max_c = np.max(core.data)
dm_c = data_max_c - data_min_c
dv_c = core.target_var
data_min_l = np.min(lithos.data)
data_max_l = np.max(lithos.data)
dm_l = data_max_l - data_min_l
dv_l = lithos.target_var
stepped_previously = np.empty([0,],dtype=int)
err_mag_sum_c = 0.0
err_mag_sum_l = 0.0
len_walked_c = 0
len_walked_l = 0
len_stepped = 0
# Start random walk
for step in step_rnd_path:
step = step
C_mm = np.empty([0,],dtype=np.longdouble)
C_dd = | np.empty([0,],dtype=np.longdouble) | numpy.empty |
# future
from __future__ import annotations
# stdlib
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
# third party
from nacl.signing import VerifyKey
import numpy as np
from sympy.ntheory.factor_ import factorint
# relative
from ....core.adp.entity import DataSubjectGroup
from ....core.adp.entity import Entity
from ...adp.publish import publish
from ...adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager
from ...common.serde.serializable import serializable
from ...tensor.passthrough import PassthroughTensor # type: ignore
from ...tensor.passthrough import is_acceptable_simple_type # type: ignore
from ..broadcastable import is_broadcastable
from .adp_tensor import ADPTensor
SupportedChainType = Union[int, bool, float, np.ndarray, PassthroughTensor]
@serializable(recursive_serde=True)
class IntermediateGammaTensor(PassthroughTensor, ADPTensor):
"""Functionality for tracking differential privacy when individual values
are contributed to by multiple entities. IntermediateGammaTensor differs
from IniitalGammaTensor only in that InitialGammaTensor has additional
functionality in its constructor essential to when one initially begins
tracking metadata across mutliple entities, whereas IntermediateGammaTensor
has a simpler constructor for use when performing operations across one or
more IntermediateGammaTensor objects.
"""
__attr_allowlist__ = [
"term_tensor",
"coeff_tensor",
"bias_tensor",
"scalar_manager",
"child",
"unique_entities",
"n_entities",
]
def __init__(
self,
term_tensor: np.ndarray,
coeff_tensor: np.ndarray,
bias_tensor: np.ndarray,
# min_vals: np.ndarray,
# max_vals: np.ndarray,
scalar_manager: VirtualMachinePrivateScalarManager = VirtualMachinePrivateScalarManager(),
) -> None:
super().__init__(term_tensor)
# EXPLAIN A: if our clipped polynomial is y = clip(mx + b, min=min_vals, max=max_vals)
# EXPLAIN B: if self.child = 5x10
# EXPLAIN A: this is "x"
# EXPLAIN B: this is a 5x10x1
self.term_tensor = term_tensor
# EXPLAIN A: this is "m"
# EXPLAIN B: this is a 5x10x1
self.coeff_tensor = coeff_tensor
# EXPLAIN A: this is "b"
# EXPLAIN B: this is a 5x10
self.bias_tensor = bias_tensor
# EXPLAIN A: this is "min_vals"
# EXPLAIN B: this is a 5x10
# self.min_vals = min_vals
# EXPLAIN A: this is "max_vals"
# EXPLAIN B: this is a 5x10
# self.max_vals = max_vals
self.scalar_manager = scalar_manager
# Unique entities
self.unique_entities: set[Entity] = set()
self.n_entities = 0
for entity in set(self._entities_list()):
if isinstance(entity, Entity):
if entity not in self.unique_entities:
self.unique_entities.add(entity)
self.n_entities += 1
elif isinstance(entity, DataSubjectGroup):
for e in entity.entity_set:
if e not in self.unique_entities:
self.unique_entities.add(e)
self.n_entities += 1
else:
raise Exception(f"{type(entity)}")
@property
def flat_scalars(self) -> List[Any]:
flattened_terms = self.term_tensor.reshape(-1, self.term_tensor.shape[-1])
flattened_coeffs = self.coeff_tensor.reshape(-1, self.coeff_tensor.shape[-1])
flattened_bias = self.bias_tensor.reshape(-1)
# flattened_min_vals = self.min_vals.reshape(-1)
# flattened_max_vals = self.max_vals.reshape(-1)
scalars = list()
for i in range(len(flattened_terms)):
single_poly_terms = flattened_terms[i]
single_poly_coeffs = flattened_coeffs[i]
single_poly_bias = flattened_bias[i]
# single_poly_min_val = flattened_min_vals[i]
# single_poly_max_val = flattened_max_vals[i]
scalar = single_poly_bias
for j in range(len(single_poly_terms)):
term = single_poly_terms[j]
coeff = single_poly_coeffs[j]
for prime, n_times in factorint(term).items():
input_scalar = self.scalar_manager.prime2symbol[prime]
right = input_scalar * n_times * coeff
scalar = scalar + right
scalars.append(scalar)
return scalars
def _values(self) -> np.array:
"""WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!
DO NOT ADD THIS METHOD TO THE AST!!!
"""
return np.array(list(map(lambda x: x.value, self.flat_scalars))).reshape(
self.shape
)
def _max_values(self) -> np.array:
"""WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!
DO NOT ADD THIS METHOD TO THE AST!!!
"""
return np.array(list(map(lambda x: x.max_val, self.flat_scalars))).reshape(
self.shape
)
def _min_values(self) -> np.array:
"""WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!
DO NOT ADD THIS METHOD TO THE AST!!!
"""
return np.array(list(map(lambda x: x.min_val, self.flat_scalars))).reshape(
self.shape
)
def _entities_list(self) -> list:
"""WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!
DO NOT ADD THIS METHOD TO THE AST!!!
"""
output_entities = []
for flat_scalar in self.flat_scalars:
# TODO: This will fail if the nested entity is any deeper than 2 levels- i.e. [A, [A, [A, B]]]. Recursive?
combined_entities = DataSubjectGroup()
for row in flat_scalar.input_entities:
if isinstance(row, Entity) or isinstance(row, DataSubjectGroup):
combined_entities += row
elif isinstance(row, list):
for i in row:
if isinstance(i, Entity) or isinstance(i, DataSubjectGroup):
combined_entities += i
else:
raise Exception(f"Not implemented for i of type:{type(i)}")
else:
raise Exception(f"No plans for row type:{type(row)}")
output_entities.append(combined_entities)
return output_entities
def _entities(self) -> np.array:
"""WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!
DO NOT ADD THIS METHOD TO THE AST!!!
"""
output_entities = []
for flat_scalar in self.flat_scalars:
# TODO: This will fail if the nested entity is any deeper than 2 levels- i.e. [A, [A, [A, B]]]. Recursive?
combined_entities = DataSubjectGroup()
for row in flat_scalar.input_entities:
if isinstance(row, Entity) or isinstance(row, DataSubjectGroup):
combined_entities += row
elif isinstance(row, list):
for i in row:
if isinstance(i, Entity) or isinstance(i, DataSubjectGroup):
combined_entities += i
else:
raise Exception(f"Not implemented for i of type:{type(i)}")
else:
raise Exception(f"No plans for row type:{type(row)}")
output_entities.append(combined_entities)
return np.array(output_entities).reshape(self.shape)
def __gt__(self, other: Union[np.ndarray, IntermediateGammaTensor]) -> Any:
if isinstance(other, np.ndarray):
if is_broadcastable(self.shape, other.shape):
# relative
from .initial_gamma import InitialGammaTensor
vals = self._values()
tensor = InitialGammaTensor(
values=vals > other,
min_vals=np.zeros_like(vals),
max_vals=np.ones_like(vals),
entities=self._entities(),
)
else:
raise Exception(
f"Tensor shapes not compatible: {self.shape} and {other.shape}"
)
elif isinstance(other, IntermediateGammaTensor):
if is_broadcastable(self.shape, other.shape):
# relative
from .initial_gamma import InitialGammaTensor
self_vals = self._values()
other_vals = other._values()
tensor = InitialGammaTensor(
values=self_vals > other_vals,
min_vals=np.zeros_like(self_vals),
max_vals=np.ones_like(self_vals),
entities=self._entities() + other._entities(),
)
else:
raise Exception(
f"Tensor shapes not compatible: {self.shape} and {other.shape}"
)
else:
raise NotImplementedError
return tensor
def __lt__(self, other: Union[np.ndarray, IntermediateGammaTensor]) -> Any:
if isinstance(other, np.ndarray):
if is_broadcastable(self.shape, other.shape):
# relative
from .initial_gamma import InitialGammaTensor
vals = self._values()
tensor = InitialGammaTensor(
values=vals < other,
min_vals=np.zeros_like(vals),
max_vals=np.ones_like(vals),
entities=self._entities(),
)
else:
raise Exception(
f"Tensor shapes not compatible: {self.shape} and {other.shape}"
)
elif isinstance(other, IntermediateGammaTensor):
if is_broadcastable(self.shape, other.shape):
# relative
from .initial_gamma import InitialGammaTensor
self_vals = self._values()
other_vals = other._values()
tensor = InitialGammaTensor(
values=self_vals < other_vals,
min_vals=np.zeros_like(self_vals),
max_vals=np.ones_like(self_vals),
entities=self._entities() + other._entities(),
)
else:
raise Exception(
f"Tensor shapes not compatible: {self.shape} and {other.shape}"
)
else:
raise NotImplementedError
return tensor
def __eq__(self, other: Union[np.ndarray, IntermediateGammaTensor]) -> Any:
if isinstance(other, np.ndarray):
if is_broadcastable(self.shape, other.shape):
# relative
from .initial_gamma import InitialGammaTensor
vals = self._values()
tensor = InitialGammaTensor(
values=not (vals < other) and not (vals > other),
max_vals= | np.ones_like(vals) | numpy.ones_like |
#!/usr/bin/env python
import numpy as np
from tqdm import tqdm
from astropy.constants import G as Ggrav
from .low_level_utils import fast_dist
G = Ggrav.to('kpc Msun**-1 km**2 s**-2').value
def all_profiles(bins, positions, velocities, masses, two_dimensional=False, zcut=None,
ages=None, pbar_msg='Making profiles"', nexpr=False):
"""
assumes all positions and velocities are rotated in the same way, such
that the angular momentum axis aligns with the z axis
if two_dimensional == False, then compute:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r), mag J(r), mag J(<r), J_z(r), J_z(<r)
if two_dimensional == True, then compute:
M(<R), M(R), rho = M(R)/dA, Vcirc = mean(vx**2 + vy**2), mag J(R), mag J(<R), J_z(R), J_z(<R)
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, rotated such that z aligns with angular momentum axis
:velocities : array-like : particle velocities, rotated in the same way as the positions
:masses : array-like : particle masses, in the same order as positions and velocities
:two_dimensional : bool : whether or not to do 2D profiles
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == velocities.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
J_of_r = np.empty(bins.size)
Jz_of_r = np.empty(bins.size)
Jz_inside_r = np.empty(bins.size)
JinsideR = np.empty(bins.size)
specJinsideR = np.zeros(bins.size)
specJ_of_r = np.zeros(bins.size)
specJz_of_r = np.zeros(bins.size)
specJz_insideR = np.zeros(bins.size)
if ages is not None:
age_of_r = np.zeros(bins.size)
density = np.empty_like(m_of_r)
if two_dimensional:
vcirc = np.zeros(bins.size)
if two_dimensional:
x, y, z = positions.T
# distances are in the plane of the galaxy
distances = np.sqrt(x**2 + y**2)
else:
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
if two_dimensional:
msk = msk & (np.abs(z) <= zcut)
positions = positions[msk]
velocities = velocities[msk]
masses = masses[msk]
distances = distances[msk]
if ages is not None:
ages = ages[msk]
if two_dimensional:
x = x[msk]
y = y[msk]
# compute (angular) momenta for the particles:
# velocities should already have the halo at
pvec = (velocities.T*masses).T
# J = r cross p, and pos is assumed to have the halo at 0,0,0
Jvec = np.cross(positions, pvec)
del pvec
Jz = Jvec[:, 2]
if two_dimensional:
# calculate circular velocities:
# velocities in the plane of the disk
vx, vy = velocities[:, 0], velocities[:, 1]
V = np.vstack((vx, vy)).T # velocity vector in the plane of the disk
R = | np.vstack((x, y)) | numpy.vstack |
import sys
import os
import pickle
import cv2
import numpy as np
CAFFE_PYTHON_PATH = os.path.join(os.path.dirname(__file__), "../python")
sys.path.insert(0, CAFFE_PYTHON_PATH)
import caffe
from Dataset import GetDataset
from ACT_utils import *
from copy import deepcopy
K = 6
IMGSIZE = 300
MEAN = np.array([[[104, 117, 123]]], dtype=np.float32)
NFLOWS = 5
def extract_tubelets(dname, gpu=-1, redo=False):
"""Extract the tubelets for a given dataset
args:
- dname: dataset name (example: 'JHMDB')
- gpu (default -1): use gpu given in argument, or use cpu if -1
- redo: wheter or not to recompute already computed files
save a pickle file for each frame
the file contains a tuple (dets, dets_all)
- dets is a numpy array with 2+4*K columns containing the tubelets starting at this frame after per-class nms at 0.45 and thresholding the scores at 0.01
the columns are <label> <score> and then <x1> <y1> <x2> <y2> for each of the frame in the tubelet
- dets_all contains the tubelets obtained after a global nms at 0.7 and thresholding the scores at 0.01
it is a numpy arrray with 4*K + L + 1 containing the coordinates of the tubelets and the scores for all labels
note: this version is inefficient: it is better to estimate the per-frame features once
"""
d = GetDataset(dname)
if gpu >= 0:
caffe.set_mode_gpu()
caffe.set_device(gpu)
model_dir = os.path.join(os.path.dirname(__file__), '../models/ACT-detector/', dname)
output_dir = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
# load the RGB network
rgb_proto = os.path.join(model_dir, "deploy_RGB.prototxt")
rgb_model = os.path.join(model_dir, "RGB.caffemodel")
net_rgb = caffe.Net(rgb_proto, caffe.TEST, weights=rgb_model)
# load the FLOW5 network
flo_proto = os.path.join(model_dir, "deploy_FLOW5.prototxt")
flo_model = os.path.join(model_dir, "FLOW5.caffemodel")
net_flo = caffe.Net(flo_proto, caffe.TEST, weights=flo_model)
vlist = d.test_vlist()
for iv, v in enumerate(vlist):
print("Processing video {:d}/{:d}: {:s}".format( iv+1, len(vlist), v))
h, w = d.resolution(v)
# network output is normalized between 0,1 ; so we will multiply it by the following array
resolution_array = np.array([w,h,w,h]*K, dtype=np.float32)
# now process each frame
for i in range(1, 1 + d.nframes(v) - K + 1):
outfile = os.path.join(output_dir, d.frame_format(v,i) + ".pkl")
# skip if already computed
if os.path.isfile(outfile) and not redo:
continue
# read the frames for the forward
kwargs_rgb = {}
kwargs_flo = {}
for j in range(K):
im = cv2.imread(d.imfile(v, i + j))
if im is None:
print("Image {:s} does not exist".format(d.imfile(v, i+j)))
return
imscale = cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)
kwargs_rgb['data_stream' + str(j)] = np.transpose(imscale-MEAN, (2, 0, 1))[None, :, :, :]
imf = [cv2.imread(d.flowfile(v, min(d.nframes(v), i + j + iflow))) for iflow in range(NFLOWS)]
if np.any(imf) is None:
print("Flow image {:s} does not exist".format(d.flowfile(v, i+j)))
return
imscalef = [cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR) for im in imf]
timscale = [np.transpose(im-MEAN, (2, 0, 1))[None, :, :, :] for im in imscalef]
kwargs_flo['data_stream' + str(j) + 'flow'] = np.concatenate(timscale, axis=1)
# compute rgb and flow scores
# two forward passes: one for the rgb and one for the flow
net_rgb.forward(end="mbox_conf_flatten", **kwargs_rgb) # forward of rgb with confidence and regression
net_flo.forward(end="mbox_conf_flatten", **kwargs_flo) # forward of flow5 with confidence and regression
# compute late fusion of rgb and flow scores (keep regression from rgb)
# use net_rgb for standard detections, net_flo for having all boxes
scores = 0.5 * (net_rgb.blobs['mbox_conf_flatten'].data + net_flo.blobs['mbox_conf_flatten'].data)
net_rgb.blobs['mbox_conf_flatten'].data[...] = scores
net_flo.blobs['mbox_conf_flatten'].data[...] = scores
net_flo.blobs['mbox_loc'].data[...] = net_rgb.blobs['mbox_loc'].data
# two forward passes, only for the last layer
# dets is the detections after per-class NMS and thresholding (stardard)
# dets_all contains all the scores and regressions for all tubelets
dets = net_rgb.forward(start='detection_out')['detection_out'][0, 0, :, 1:]
dets_all = net_flo.forward(start='detection_out_full')['detection_out_full'][0, 0, :, 1:]
# parse detections with per-class NMS
if dets.shape[0] == 1 and np.all(dets == -1):
dets = np.empty((0, dets.shape[1]), dtype=np.float32)
dets[:, 2:] *= resolution_array # network output was normalized in [0..1]
dets[:, 0] -= 1 # label 0 was background, come back to label in [0..nlabels-1]
dets[:, 2::2] = np.maximum(0, np.minimum(w, dets[:, 2::2]))
dets[:, 3::2] = np.maximum(0, np.minimum(h, dets[:, 3::2]))
# parse detections with global NMS at 0.7 (top 300)
# coordinates were normalized in [0..1]
dets_all[:, 0:4*K] *= resolution_array
dets_all[:, 0:4*K:2] = np.maximum(0, np.minimum(w, dets_all[:, 0:4*K:2]))
dets_all[:, 1:4*K:2] = np.maximum(0, np.minimum(h, dets_all[:, 1:4*K:2]))
idx = nms_tubelets(np.concatenate((dets_all[:, :4*K], np.max(dets_all[:, 4*K+1:], axis=1)[:, None]), axis=1), 0.7, 300)
dets_all = dets_all[idx, :]
# save file
if not os.path.isdir(os.path.dirname(outfile)):
os.system('mkdir -p ' + os.path.dirname(outfile))
with open(outfile, 'wb') as fid:
pickle.dump((dets, dets_all), fid)
def load_frame_detections(d, vlist, dirname, nms):
if isinstance(d, str):
d = GetDataset(d)
alldets = [] # list of numpy array with <video_index> <frame_index> <ilabel> <score> <x1> <y1> <x2> <y2>
for iv, v in enumerate(vlist):
h,w = d.resolution(v)
# aggregate the results for each frame
vdets = {i: np.empty((0,6), dtype=np.float32) for i in range(1, 1 + d.nframes(v))} # x1, y1, x2, y2, score, ilabel
# load results for each starting frame
for i in range(1, 1 + d.nframes(v) - K + 1):
resname = os.path.join(dirname, d.frame_format(v,i) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets "+resname)
sys.exit()
with open(resname, 'rb') as fid:
dets, _ = pickle.load(fid)
if dets.size == 0:
continue
for k in range(K):
vdets[i+k] = np.concatenate( (vdets[i+k],dets[:,np.array([2+4*k,3+4*k,4+4*k,5+4*k,1,0])] ), axis=0)
# Perform NMS in each frame
for i in vdets:
idx = np.empty((0,), dtype=np.int32)
for ilabel in range(d.nlabels):
a = np.where(vdets[i][:,5] == ilabel)[0]
if a.size == 0:
continue
idx = np.concatenate((idx, a[nms2d(vdets[i][vdets[i][:, 5] == ilabel, :5], nms)]), axis=0)
if idx.size == 0:
continue
alldets.append(np.concatenate((iv * np.ones((idx.size, 1), dtype=np.float32), i * np.ones((idx.size, 1), dtype=np.float32), vdets[i][idx, :][:, np.array([5, 4, 0, 1, 2, 3], dtype=np.int32)]), axis=1))
return np.concatenate(alldets, axis=0)
def frameAP(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameAP{:g}.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load per-frame detections
alldets = load_frame_detections(d, vlist, dirname, 0.3)
res = {}
# compute AP for each class
for ilabel,label in enumerate(d.labels):
# detections of this class
detections = alldets[alldets[:, 2] == ilabel, :]
# load ground-truth of this class
gt = {}
for iv, v in enumerate(vlist):
tubes = d.gttubes(v)
if not ilabel in tubes:
continue
for tube in tubes[ilabel]:
for i in range(tube.shape[0]):
k = (iv, int(tube[i, 0]))
if not k in gt:
gt[k] = []
gt[k].append(tube[i, 1:5].tolist())
for k in gt:
gt[k] = np.array( gt[k] )
# pr will be an array containing precision-recall values
pr = np.empty((detections.shape[0] + 1, 2), dtype=np.float32)# precision,recall
pr[0, 0] = 1.0
pr[0, 1] = 0.0
fn = sum([g.shape[0] for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
for i, j in enumerate(np.argsort(-detections[:,3])):
k = (int(detections[j,0]), int(detections[j,1]))
box = detections[j, 4:8]
ispositive = False
if k in gt:
ious = iou2d(gt[k], box)
amax = np.argmax(ious)
if ious[amax] >= th:
ispositive = True
gt[k] = np.delete(gt[k], amax, 0)
if gt[k].size == 0:
del gt[k]
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1, 0] = float(tp) / float(tp + fp)
pr[i+1, 1] = float(tp) / float(tp + fn)
res[label] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
ap = 100*np.array([pr_to_ap(res[label]) for label in d.labels])
print("frameAP")
for il, _ in enumerate(d.labels):
print("{:20s} {:8.2f}".format('', ap[il]))
print("{:20s} {:8.2f}".format("mAP", np.mean(ap)))
print("")
def frameAP_error(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameAP{:g}ErrorAnalysis.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load per-frame detections
alldets = load_frame_detections(d, vlist, dirname, 0.3)
res = {}
# compute AP for each class
for ilabel,label in enumerate(d.labels):
# detections of this class
detections = alldets[alldets[:, 2] == ilabel, :]
gt = {}
othergt = {}
labellist = {}
for iv, v in enumerate(vlist):
tubes = d.gttubes(v)
labellist[v] = tubes.keys()
for il in tubes:
for tube in tubes[il]:
for i in range(tube.shape[0]):
k = (iv, int(tube[i, 0]))
if il == ilabel:
if k not in gt:
gt[k] = []
gt[k].append(tube[i, 1:5].tolist())
else:
if k not in othergt:
othergt[k] = []
othergt[k].append(tube[i, 1:5].tolist())
for k in gt:
gt[k] = np.array(gt[k])
for k in othergt:
othergt[k] = np.array(othergt[k])
dupgt = deepcopy(gt)
# pr will be an array containing precision-recall values and 4 types of errors:
# localization, classification, timing, others
pr = np.empty((detections.shape[0] + 1, 6), dtype=np.float32)# precision, recall
pr[0, 0] = 1.0
pr[0, 1:] = 0.0
fn = sum([g.shape[0] for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
EL = 0 # localization errors
EC = 0 # classification error: overlap >=0.5 with an another object
EO = 0 # other errors
ET = 0 # timing error: the video contains the action but not at this frame
for i, j in enumerate(np.argsort(-detections[:,3])):
k = (int(detections[j, 0]), int(detections[j,1]))
box = detections[j, 4:8]
ispositive = False
if k in dupgt:
if k in gt:
ious = iou2d(gt[k], box)
amax = np.argmax(ious)
if k in gt and ious[amax] >= th:
ispositive = True
gt[k] = | np.delete(gt[k], amax, 0) | numpy.delete |
#!/usr/bin/env python
"""
Curves, tracks, skeletons connecting surface mesh vertices.
Authors:
- <NAME>, 2012-2016 (<EMAIL>) http://binarybottle.com
- <NAME>, 2012 (<EMAIL>)
Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def connect_points_erosion(S, neighbor_lists, outer_anchors, inner_anchors=[],
values=[], erode_ratio=0.1, erode_min_size=10,
save_steps=[], save_vtk='', background_value=-1,
verbose=False):
"""
Connect mesh vertices with a skeleton of 1-vertex-thick curves by erosion.
This algorithm iteratively removes simple topological points and endpoints,
optionally in order of lowest to highest values.
Parameters
----------
S : numpy array of integers
values for all vertices (disregard background values)
outer_anchors : list of integers
indices of vertices to connect
inner_anchors : list of integers
more vertices to connect; they are removed if they result in endpoints
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
values : numpy array of floats
values for S elements, to optionally remove points
in order of lowest to highest values
erode_ratio : float
fraction of indices to test for removal at each iteration (if values)
erode_min_size : integer
minimum number of vertices when considering erode_ratio
save_steps : list of integers (optional)
iterations at which to save incremental VTK file
save_vtk : string
name of VTK file to transfer incremental values (if save_steps)
background_value : integer or float
background value
verbose : bool
print statements?
Returns
-------
skeleton : list of integers
indices to vertices of skeleton
Examples
--------
>>> # Extract a skeleton to connect endpoints in a fold:
>>> import numpy as np
>>> from mindboggle.guts.paths import connect_points_erosion
>>> from mindboggle.guts.paths import find_outer_endpoints
>>> from mindboggle.mio.vtks import read_scalars, read_vtk
>>> from mindboggle.guts.compute import median_abs_dev
>>> from mindboggle.guts.paths import find_max_values
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> curv_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
>>> depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> points, f1,f2,f3, curvs, f4,f5,f6 = read_vtk(curv_file, True,True)
>>> depths, name = read_scalars(depth_file, True, True)
>>> folds, name = read_scalars(folds_file, True, True)
>>> values = depths * curvs
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in values[0:5]]
[-0.11778, -0.35642, -0.80759, -0.25654, -0.04411]
>>> neighbor_lists = find_neighbors_from_file(curv_file)
>>> background_value = -1
>>> # Limit number of folds to speed up the test:
>>> limit_folds = True
>>> if limit_folds:
... fold_numbers = [4] #[4, 6]
... indices = [i for i,x in enumerate(folds) if x in fold_numbers]
... i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
... folds[i0] = background_value
... else:
... indices = range(len(values))
>>> # Outer anchors:
>>> min_separation = 10
>>> verbose = False
>>> outer_anchors, tracks = find_outer_endpoints(indices, neighbor_lists,
... values, depths, min_separation,
... background_value, verbose)
>>> outer_anchors[0:10]
[50324, 66986, 75661]
>>> # Inner anchors:
>>> values0 = [x for x in values if x > 0]
>>> thr = np.median(values0) + 2 * median_abs_dev(values0)
>>> inner_anchors = find_max_values(points, values, min_separation, thr)
>>> inner_anchors[0:10]
[61455, 41761, 67978, 72621, 78546, 40675, 73745, 98736, 125536, 119813]
>>> erode_ratio = 0.10
>>> erode_min_size = 10
>>> save_steps = [] #list(range(0,500,50))
>>> save_vtk = depth_file
>>> S = np.copy(folds)
>>> skeleton = connect_points_erosion(S, neighbor_lists,
... outer_anchors, inner_anchors, values, erode_ratio, erode_min_size,
... save_steps, save_vtk, background_value, verbose)
>>> skeleton[0:10]
[50324, 50333, 50339, 51552, 51560, 52707, 52716, 52724, 52725, 53893]
Write out vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> folds[skeleton] = 10 # doctest: +SKIP
>>> folds[outer_anchors] = 15 # doctest: +SKIP
>>> rewrite_scalars(depth_file, 'connect_points_erosion.vtk',
... folds, 'skeleton', folds, background_value) # doctest: +SKIP
>>> plot_surfaces('connect_points_erosion.vtk') # doctest: +SKIP
"""
import numpy as np
from mindboggle.guts.mesh import topo_test, extract_edge, find_endpoints
from mindboggle.guts.segment import segment_regions
# Make sure arguments are numpy arrays:
if not isinstance(S, np.ndarray):
S = np.array(S)
erode_by_value = False
if len(values) and 0 <= erode_ratio < 1:
erode_by_value = True
if not isinstance(values, np.ndarray):
values = np.array(values)
keep = outer_anchors[:]
keep.extend(inner_anchors)
remove_endpoints = True
if save_steps:
from mindboggle.mio.vtks import rewrite_scalars
S0 = S.copy()
# ------------------------------------------------------------------------
# Iteratively remove simple points:
# ------------------------------------------------------------------------
if verbose:
print(' Remove up to {0} of edge vertices per iteration'.
format(erode_ratio))
complex = []
count = -1
exist_simple = True
while exist_simple:
exist_simple = False
if verbose or save_steps:
count += 1
# --------------------------------------------------------------------
# Only consider updating vertices that are on the edge of the
# region and are not among the indices to keep or known simple points:
# --------------------------------------------------------------------
indices = | np.where(S != background_value) | numpy.where |
"""
Pyhton DLIS file reader
<NAME> - <EMAIL>
July 2016
PREFACE:
American Petroleum Institute (API) Standard RP66 Version 1 (RP66 V1),
published in May 1991, specified a format for digital well log data,
called Digital Log Interchange Standard (DLIS).
RP66 V1 publication was under jurisdiction of API until June 1998,
when Petrotechnical Open Software Corporation (POSC) accepted its
stewardship.
In November 2006, POSC re-brands itself as Energistics.
PURPOSE:
This software was created to read DLIS files.
At this time only DLIS Version 1 (RP66 V1) is supported.
SOURCES:
This code was developed based on Energistics RP66 V1 standard:
http://w3.energistics.org/RP66/V1/Toc/main.html
USAGE:
(1) To read a DLIS file into memory, just use:
dlis = DLISFile() (mandatory)
dlis.read(filename) (mandatory)
(2) An example of usage (just a example) can be shown with:
dlis.print_logical_file() (optional)
The function above is just a taste of this DLIS reader, it
produces some result like this:
Logical File: 0
1&0&B61441
#1
0&0&INDEX : 1640.375 m
1&0&DT : -999.25 us/ft
1&0&GR : 51.84400177 gAPI
1&0&ILD : 0.0189999993891 ohm.m
1&0&CALI : 12.3409996033 in
1&0&RHOB : 4.29400014877 g/cm3
1&0&NPHI : 0.675999999046 m3/m3
#2
0&0&INDEX : 1640.5 m
1&0&DT : -999.25 us/ft
1&0&GR : 55.9160003662 gAPI
1&0&ILD : 0.0189999993891 ohm.m
1&0&CALI : 12.3509998322 in
1&0&RHOB : 4.29400014877 g/cm3
1&0&NPHI : 0.65030002594 m3/m3
...
#n
(3) For a real usage, use these data structures:
- dlis.data: a list of Logical Wells data. Each Logical Well data
is an OrderedDict containing object name as key and
another OrderedDict as object values, that values are
a OrderedDict too containing data index as key
(e.g #1, #2, #n) and a list of values as a dict value.
This list of values are the real log data values.
The structure is illustrated below.
-> Logical Well Data 1
-> Logical Well Data 2
--> (object_name_1, object_dict_1), where object_dict_1 is:
---> (data_index_1, list_of_values_1)
---> (data_index_2, list_of_values_2)
---> (data_index_n, list_of_values_n)
--> (object_name_2, object_dict_2)
--> (object_name_n, object_dict_n)
-> Logical Well Data n
- dlis.data_props: a list of Logical Wells properties. Each Logical
Well properties is an OrderedDict containing object
name as key and another OrderedDict as values
**** (????) - dlis.SUL: a list of well parameters (header parameters).
- dlis.file_header = None
- dlis.origin = None
- dlis.parameter = None
- dlis.frame = None
- dlis.channel = None
"""
import os
import struct
from collections import OrderedDict
import numpy as np
import app
#from multiprocessing import Process, Queue
#import threading
#import utils
def _get_value(data, format_, big_endian=True):
big = ''
if big_endian:
big = '>'
format_ = big + format_
try:
# print()
# print(data, type(data))
n = struct.unpack(format_, data)
# print(n)
# print(n[0], type(n[0]))
# print()
return n[0]
except Exception:
raise
def get_from_list(data_list, start_offset, code, size=None):
code_spec = RepresentationCodes.get_code(code)
# print()
# print('\nget_from_list', start_offset, code, code_spec)
if code_spec is None:
msg = 'Code ' + str(code) + ' is not recognized.'
raise Exception(msg)
special = code_spec.get('special')
if special is None:
if code_spec.get('size') != "variable":
return start_offset+code_spec.get('size'), \
_get_value(data_list[start_offset:start_offset+\
code_spec.get('size')],\
code_spec.get('format')
)
else:
raise Exception()
if special:
if code == 1:
raise Exception()
'''
v1 = ord(data_list[start_offset:start_offset+1])
v2 = ord(data_list[start_offset+1:start_offset+2])
result = bin(v1)[2:].zfill(8)
result += bin(v2)[2:].zfill(8)
fraction = result[1:12]
exponent = result[12:16]
if result[0] == '0':
exponent = int(exponent, 2)
fraction = int(fraction, 2) / 2. ** 23
value = fraction * 2. ** (exponent)
else:
converted_exponent = ''
for i in range(8):
if exponent[i] == '0':
converted_exponent += '1'
else:
converted_exponent += '0'
exponent = int(converted_exponent, 2)
converted_fraction = ''
achou = False
for i in range(22, -1, -1):
if achou:
if fraction[i] == '0':
converted_fraction = '1' + converted_fraction
else:
converted_fraction = '0' + converted_fraction
else:
converted_fraction = fraction[i] + converted_fraction
if fraction[i] == '1':
achou = True
fraction = int(converted_fraction, 2) / 2. ** 23
fraction = fraction * (-1)
value = fraction * 2. ** (exponent - 128)
return start_offset+2, value
'''
elif code == 2:
values = []
for i in range(4):
v = ord(data_list[start_offset+i:start_offset+i+1])
values.append(v)
result = ''
for value in values:
result += bin(value)[2:].zfill(8)
exponent = result[1:9]
mantissa = result[9:32]
exponent = int(exponent, 2)
mantissa = int(mantissa, 2) / 2. ** 23
if result[0] == '1':
value = -(1 + mantissa) * 2. ** (exponent - 127)
else:
value = (1 + mantissa) * 2. ** (exponent - 127)
return start_offset+4, value
elif code == 3:
new_offset, V = get_from_list(data_list, start_offset, 2) # FSINGL
new_offset, A = get_from_list(data_list, new_offset, 2) # FSINGL
# V is a nominal value with a confidence interval of [V - A, V + A]
return new_offset, V, A
elif code == 4:
new_offset, V = get_from_list(data_list, start_offset, 2) # FSINGL
new_offset, A = get_from_list(data_list, new_offset, 2) # FSINGL
new_offset, B = get_from_list(data_list, new_offset, 2) # FSINGL
# V is a nominal value with a confidence interval of [V - A, V + B]
return new_offset, V, A, B
elif code == 5:
values = []
for i in range(4):
v = ord(data_list[start_offset+i:start_offset+i+1])
values.append(v)
result = ''
for value in values:
result += bin(value)[2:].zfill(8)
exponent = result[1:8]
mantissa = result[8:32]
exponent = int(exponent, 2)
mantissa = int(mantissa, 2)
if result[0] == '1':
value = -1 * mantissa * 16. ** (exponent - 64)
else:
value = mantissa * 16. ** (exponent - 64)
return start_offset+4, value
#
elif code == 6:
raise Exception('code == 6!!!')
#
elif code == 7:
values = []
for i in range(8):
v = ord(data_list[start_offset+i:start_offset+i+1])
values.append(v)
result = ''
for value in values:
result += bin(value)[2:].zfill(8)
exponent = result[1:12]
mantissa = result[12:64]
exponent = int(exponent, 2)
mantissa = int(mantissa, 2)
if result[0] == '1':
value = -1 * (1 + mantissa) * 2. ** (exponent - 1023)
else:
value = (1 + mantissa) * 2. ** (exponent - 1023)
return start_offset+8, value
elif code == 8:
new_offset, V = get_from_list(data_list, start_offset, 7) # FDOUBL
new_offset, A = get_from_list(data_list, new_offset, 7) # FDOUBL
# V is a nominal value with a confidence interval of [V - A, V + A]
return new_offset, V, A
elif code == 9:
new_offset, V = get_from_list(data_list, start_offset, 7) # FDOUBL
new_offset, A = get_from_list(data_list, new_offset, 7) # FDOUBL
new_offset, B = get_from_list(data_list, new_offset, 7) # FDOUBL
# V is a nominal value with a confidence interval of [V - A, V + B]
return new_offset, V, A, B
elif code == 10:
new_offset, R = get_from_list(data_list, start_offset, 2) # FSINGL
new_offset, I = get_from_list(data_list, new_offset, 2) # FSINGL
# Value = R + i* I, i = (-1)1/2
return new_offset, R, I
elif code == 11:
new_offset, R = get_from_list(data_list, start_offset, 7) # FDOUBL
new_offset, I = get_from_list(data_list, new_offset, 7) # FDOUBL
# Value = R + i* I, i = (-1)1/2
return new_offset, R, I
elif code == 18 or code == 22:
#print data_list[start_offset:start_offset+1]
try:
bin_vec = bin(ord(data_list[start_offset:start_offset+1]))[2:].zfill(8)
except Exception:
print('start_offset:', start_offset, len(data_list))
raise Exception('Verificar IndexError')
if bin_vec[0] == '0':
return start_offset+1, int(bin_vec, 2)
else:
if bin_vec[1] == '0':
bin_vec = '0' + bin_vec[1:]
bin_vec += bin(ord(data_list[start_offset+1:start_offset+2]))[2:].zfill(8)
return start_offset+2, int(bin_vec, 2)
else:
bin_vec = '00' + bin_vec[2:]
bin_vec += bin(ord(data_list[start_offset+1:start_offset+2]))[2:].zfill(8)
bin_vec += bin(ord(data_list[start_offset+2:start_offset+3]))[2:].zfill(8)
bin_vec += bin(ord(data_list[start_offset+3:start_offset+4]))[2:].zfill(8)
return start_offset+4, int(bin_vec, 2)
elif code == 19 or code == 27:
new_offset, value = get_from_list(data_list, start_offset, 15) # USHORT
return new_offset+value, \
data_list[new_offset:new_offset+value].decode("utf-8")
elif code == 20:
new_offset, value = get_from_list(data_list, start_offset, 18) # UVARI
return new_offset+value, \
data_list[new_offset:new_offset+value].decode("utf-8")
elif code == 21:
dtime = OrderedDict()
new_offset, year = get_from_list(data_list, start_offset, 15) # USHORT
year = 1900 + year
dtime['Y'] = year
v1 = ord(data_list[new_offset:new_offset+1])
new_offset += 1
result = bin(v1)[2:].zfill(8)
tz = result[0:4]
m = result[4:8]
dtime['TZ'] = tz
dtime['M'] = m
new_offset, day = get_from_list(data_list, new_offset, 15) # USHORT
dtime['D'] = day
new_offset, hours = get_from_list(data_list, new_offset, 15) # USHORT
dtime['H'] = hours
new_offset, minutes = get_from_list(data_list, new_offset, 15) # USHORT
dtime['MN'] = minutes
new_offset, seconds = get_from_list(data_list, new_offset, 15) # USHORT
dtime['S'] = seconds
new_offset, milliseconds = get_from_list(data_list, new_offset, 16) # UNORM
dtime['MS'] = milliseconds
return new_offset, dtime
elif code == 23:
new_offset, O = get_from_list(data_list, start_offset, 22) # ORIGIN
new_offset, C = get_from_list(data_list, new_offset, 15) # USHORT
new_offset, I = get_from_list(data_list, new_offset, 19) # IDENT
return new_offset, (O, C, I)
# O = Origin Reference
# C = Copy Number
# I = Identifier
elif code == 24:
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
new_offset, N = get_from_list(data_list, new_offset, 23) # OBNAME
objref = OrderedDict()
objref['T'] = T
objref['N'] = N
# T = obj type - N = obj name
return new_offset, objref
elif code == 25:
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
new_offset, N = get_from_list(data_list, start_offset, 23) # OBNAME
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
raise Exception()
# T = Object Type
# N = Object Name
# L = Attribute Label
elif code == 26:
new_offset, value = get_from_list(data_list, start_offset, 15) # USHORT
if value == 0:
return False
if value == 1:
return True
raise Exception()
elif code == 28:
v1 = ord(data_list[start_offset:start_offset+1])
result = bin(v1)[2:].zfill(8)
ret = []
for i in range(len(result)):
ret.append(int(result[i]))
return start_offset+1, ret
"""
0: Logical Record Structure
0 = Indirectly Formatted Logical Record
1 = Explicitly Formatted Logical Record
1: Predecessor
0 = This is the first segment of the Logical Record
1 = This is not the first segment of the Logical Record
2: Successor
0 = This is the last Segment of the Logical Record.
1 = This is not the last Segment of the Logical Record
3: Encryption
0 = No encryption.
1 = Logical Record is encrypted
4: Encryption Packet
0 = No Logical Record Segment Encryption Packet
1 = Logical Record Segment Encryption Packet is present
5: Checksum
0 = No checksum
1 = A checksum is present in the LRST
6: Trailing Length
0 = No Trailing Length
1 = A copy of the LRS lengt is present in the LRST
7: Padding
0 = No record padding
1 = Pad bytes are present in LRST
"""
"""
Given a Explicitly Formatted Logical Record (EFLR) code, returns its type,
description and allowed set types.
"""
'''
def get_EFLR_for_code(EFLR_code):
if not isinstance(EFLR_code, int):
raise Exception('EFLR_code must be a int value.')
if EFLR_code < 0 or EFLR_code > 127:
raise Exception('EFLR code does not exist.')
if EFLR_code > 11:
raise Exception('Undefined or reserved EFLR code are not available at this time.')
ret = {}
if EFLR_code == 0:
ret['type'] = 'FHLR'
ret['desc'] = 'File Header'
ret['allow'] = ['FILE-HEADER']
elif EFLR_code == 1:
ret['type'] = 'OLR'
ret['desc'] = 'Origin'
ret['allow'] = ['ORIGIN', 'WELL-REFERENCE']
elif EFLR_code == 2:
ret['type'] = 'AXIS'
ret['desc'] = 'Coordinate Axis'
ret['allow'] = ['AXIS']
elif EFLR_code == 3:
ret['type'] = 'CHANNL'
ret['desc'] = 'Channel-related information'
ret['allow'] = ['CHANNEL']
elif EFLR_code == 4:
ret['type'] = 'FRAME'
ret['desc'] = 'Frame Data'
ret['allow'] = ['FRAME', 'PATH']
elif EFLR_code == 5:
ret['type'] = 'STATIC'
ret['desc'] = 'Static Data'
ret['allow'] = ['CALIBRATION', 'CALIBRATION-COEFFICIENT', \
'CALIBRATION-MEASUREMENT', 'COMPUTATION', 'EQUIPMENT', 'GROUP',\
'PARAMETER', 'PROCESS', 'SPICE', 'TOOL', 'ZONE']
elif EFLR_code == 6:
ret['type'] = 'SCRIPT'
ret['desc'] = 'Textual Data'
ret['allow'] = ['COMMENT']
elif EFLR_code == 7:
ret['type'] = 'UPDATE'
ret['desc'] = 'Update Data'
ret['allow'] = ['UPDATE']
elif EFLR_code == 8:
ret['type'] = 'UDI'
ret['desc'] = 'Unformatted Data Identifier'
ret['allow'] = ['NO-FORMAT']
elif EFLR_code == 9:
ret['type'] = 'LNAME'
ret['desc'] = 'Long Name'
ret['allow'] = ['LONG-NAME']
elif EFLR_code == 10:
ret['type'] = 'SPEC'
ret['desc'] = 'Specificfation'
ret['allow'] = ['ATTRIBUTE', 'CODE', 'EFLR', 'IFLR', 'OBJECT-TYPE',\
'REPRESENTATION-CODE', 'SPECIFICATION', 'UNIT-SYMBOL']
elif EFLR_code == 11:
ret['type'] = 'DICT'
ret['desc'] = 'Dictionary'
ret['allow'] = ['BASE-DICTIONARY', 'IDENTIFIER', 'LEXICON', 'OPTION']
return ret
'''
def get_objname_from_tuple(obj_name_tuple):
"""Given a O, C, I tuple, return its string full name
(e.g 0&0&DEFINING_ORIGIN).
"""
O, C, I = obj_name_tuple
return str(O) + '&' + str(C) + '&' + I
def get_actual_objname(full_object_name):
"""Given a object string full name (e.g 0&0&DEFINING_ORIGIN), returns
its name (e.g DEFINING_ORIGIN).
"""
return full_object_name.split('&')[2]
class RepresentationCodes(object):
instance = None
def __init__(self):
# base_path == this floder
base_path = os.path.dirname(os.path.abspath(__file__))
rc_json_file = 'representation_codes.json'
self.codes = app.app_utils.read_json_file(
os.path.join(base_path, rc_json_file)
)
@classmethod
def start(cls):
if cls.instance is None:
cls.instance = RepresentationCodes()
@classmethod
def get_code(cls, code):
val = None
if cls.instance:
val = cls.instance.codes[code-1]
return val
class DLISObjectPool(object):
current_file_number = -1
current_lr = -1
lrs = None
objects = None
lr_to_object = None
object_to_lr = None
@classmethod
def init_pool(cls):
"""Init DLISObjectPool attributes.
"""
cls.current_file_number = -1
cls.current_lr = -1
cls.lrs = OrderedDict()
cls.objects = OrderedDict()
cls.lr_to_object = OrderedDict()
cls.object_to_lr = OrderedDict()
@classmethod
def register_logical_record(cls, lr_structure_type, lr_type, lr_code):
"""Register a new Logical Record, with its structure type, LR type,
LR code.
"""
if lr_structure_type != 0 and lr_structure_type != 1:
raise Exception('Logical Record Structure type invalid. ' +
'Valid types are 0 for IFLRs or 1 for EFLR.')
# Starting a new logical file
if lr_type == 'FILE-HEADER':
if cls.lrs is None:
cls.init_pool()
cls.current_file_number += 1
cls.lrs[cls.current_file_number] = OrderedDict()
cls.lr_to_object[cls.current_file_number] = OrderedDict()
cls.object_to_lr[cls.current_file_number] = OrderedDict()
cls.current_lr = 0
else:
cls.current_lr += 1
new_set = OrderedDict()
new_set['type'] = lr_type
new_set['code'] = lr_code
new_set['structure_type'] = lr_structure_type
new_set['template'] = []
new_set['closed'] = False
cls.lrs.get(cls.current_file_number)[cls.current_lr] = new_set
cls.lr_to_object.get(cls.current_file_number)[lr_type] = []
@classmethod
def register_object(cls, object_name):
"""Register a new DLIS Object, with its name.
"""
if not cls.get_logical_records()[-1].get('closed'):
cls.get_logical_records()[-1]['closed'] = True
if cls.objects.get(cls.current_file_number) is None:
cls.objects[cls.current_file_number] = OrderedDict()
cls.objects.get(cls.current_file_number)[object_name] = []
current_lr = cls.get_logical_records()[-1]
cls.object_to_lr.get(cls.current_file_number)[object_name] = current_lr.get('type')
cls.lr_to_object.get(cls.current_file_number).get(current_lr.get('type')).append(object_name)
@classmethod
def get_logical_records(cls, file_number=None):
if file_number is None:
file_number = cls.current_file_number
return list(cls.lrs.get(file_number).values())
@classmethod
def get_logical_record(cls, lr_type, file_number=None):
for lr in cls.get_logical_records(file_number):
if lr.get('type') == lr_type:
return lr
return None
@classmethod
def get_objects_of_type(cls, lr_type, file_number=None):
if file_number is None:
file_number = cls.current_file_number
obj_names = cls.lr_to_object.get(file_number).get(lr_type)
ret_map = OrderedDict()
if not obj_names:
return ret_map
for obj_name in obj_names:
ret_map[obj_name] = cls.objects.get(cls.current_file_number).get(obj_name)
return ret_map
@classmethod
def get_objects_dict_of_type(cls, lr_type, file_number=None):
if file_number is None:
file_number = cls.current_file_number
ret_map = OrderedDict()
objects = cls.get_objects_of_type(lr_type, file_number)
if not objects:
return ret_map
template_list = cls.get_logical_record(lr_type, file_number).get('template')
for obj_name, obj_values in objects.items():
obj_map = OrderedDict()
for idx, value in enumerate(obj_values):
#print 'idx', idx, template_list[idx]
obj_map[template_list[idx].get('name')] = value
ret_map[obj_name] = obj_map
return ret_map
@classmethod
def get_object_values_list(cls, object_name, file_number=None):
"""Given a object name (e.g 0&0&WN or 1&0&RHOB) return its values list.
If file_number is not given, the latest one will be used.
"""
if file_number is None:
file_number = cls.current_file_number
obj_values_list = cls.objects.get(file_number).get(object_name)
return obj_values_list
@classmethod
def get_object_values_dict(cls, object_name, file_number=None):
if file_number is None:
file_number = cls.current_file_number
obj_values_list = cls.get_object_values_list(object_name, file_number)
if obj_values_list is None:
return None
lr_type = cls.object_to_lr.get(file_number).get(object_name)
ret_map = OrderedDict()
for set_map in list(cls.lrs.get(file_number).values()):
if set_map.get('type') == lr_type:
for idx, template in enumerate(set_map.get('template')):
try:
ret_map[template.get('name')] = obj_values_list[idx]
except IndexError:
return ret_map
return ret_map
def _get_SUL(data):
# Getting Storage Unit Label (SUL)
if len(data) != 80 and len(data) != 128:
raise Exception('Input data size not according excepted (Excepted 80 or 120 bytes).')
SUL = OrderedDict()
SUL['Storage unit sequence number'] = data[0:4].decode("utf-8").strip()
SUL['RP66 version and format edition'] = data[4:9].decode("utf-8").strip()
SUL['Storage unit structure'] = data[9:15].decode("utf-8").strip()
if SUL.get('RP66 version and format edition').split('.')[0] == 'V1':
SUL['Maximum visible record length'] = data[15:20].decode("utf-8").strip()
SUL['Storage set identifier'] = data[20:80].decode("utf-8").strip()
elif SUL.get('RP66 version and format edition').split('.')[0] == 'V2':
if len(data) == 80:
raise Exception('DLIS version 2 needs 128 bytes for Storage Unit Label (SUL).')
SUL['Binding edition'] = data[15:19].decode("utf-8").strip()
SUL['Maximum visible record length'] = data[19:29].decode("utf-8").strip()
SUL['Producer organization code'] = data[29:39].decode("utf-8").strip()
SUL['Creation date'] = data[39:50].decode("utf-8").strip()
SUL['Serial number'] = data[50:62].decode("utf-8").strip()
SUL['reserved'] = data[62:68].decode("utf-8").strip()
SUL['Storage set identifier'] = data[68:128].decode("utf-8").strip()
return SUL
class DLISFile(object):
def __init__(self):
RepresentationCodes.start()
# base_path == this floder
base_path = os.path.dirname(os.path.abspath(__file__))
mapping_file = 'DLIS_RP66V1_MAPPING.json'
self.mapping = app.app_utils.read_json_file(
os.path.join(base_path, mapping_file)
)
#
self._clear()
def _clear(self):
#
DLISObjectPool.init_pool()
#
self.SUL = None
self.file_size = -1
self.data = None
self.data_props = None
#
self.file_header = None
self.origin = None
self.parameter = None
self.frame = None
self.channel = None
#
#self.queue = Queue()
#
def get_file_read_percent(self):
if self.file_size == -1:
return 0
return float(self.file.tell())*100/self.file_size
@staticmethod
def is_DLIS_file(filename):
try:
file_ = open(filename, mode='rb')
# Getting Storage Unit Label (SUL)
SUL = _get_SUL(file_.read(128))
file_.close()
if SUL.get('RP66 version and format edition').split('.')[0] != 'V1' \
and SUL.get('RP66 version and format edition').split('.')[0] != 'V2':
return False
return True
except Exception:
return False
def print_logical_file(self, file_index=None, limit=None):
if file_index is None:
file_index = range(len(self.data))
elif file_index == -1:
file_index = range(len(self.data)-1, len(self.data), 1)
elif file_index >= 0 and file_index < len(self.data):
file_index = range(file_index, file_index+1, 1)
else:
raise Exception()
if limit is not None:
counter = 1
for idx in file_index:
datum = self.data[idx]
print('\n\nLogical File:', idx)
for object_name, object_dict in datum.items():
print('\n', object_name)
for data_idx, data_values in object_dict.items():
print('\n ', data_idx)
for i, v in enumerate(data_values):
print(' ', list(self.data_props[idx].get(object_name).keys())[i], \
': ', v, list(self.data_props[idx].get(object_name).values())[i].get('UNITS'))
if limit is not None:
if counter == limit:
msg = '\nLimit of ' + str(limit) + ' registers was reached. End of print.'
print(msg)
return
else:
counter += 1
print('\nEnd of print.')
'''
def read(self, filename, callback=None, threading_stop_event=None):
#t = threading.Thread(target=self._read, args=(filename, callback))
#t.start()
#t.join()
p = Process(target=self._read, args=(filename, callback))
p.start()
p.join()
'''
def read(self, filename, callback=None, threading_stop_event=None):
# Clear DLISObjectPool
DLISObjectPool.init_pool()
#
self.filename = filename
#self.callback = callback
self.file = open(self.filename, mode='rb')
self.file_size = os.fstat(self.file.fileno()).st_size
# Getting Storage Unit Label (SUL)
self.SUL = _get_SUL(self.file.read(128))
# print()
# print(self.SUL)
# print()
if self.SUL.get('RP66 version and format edition').split('.')[0] == 'V1':
self.file.seek(80)
elif self.SUL.get('RP66 version and format edition').split('.')[0] != 'V2':
raise Exception('This is not a DLIS File.')
#
self._read_Logical_Records(callback, threading_stop_event)
#self._reading_process = Process(target=self._read_Logical_Records,
# args=(stop_event, 'task'))
#print 'a', self.file.tell()
#self._reading_process.start()
#print 'b', self.file.tell(), self._reading_process.is_alive()
#self._reading_process.join()
#print 'c', self.file.tell(), self._reading_process.is_alive()
#
self.file.close()
#
self._load_file_header_props()
self._load_origin_props()
self._load_parameter_props()
self._load_frame_props()
self._load_channel_props()
#
if threading_stop_event:
if threading_stop_event.is_set():
print('File reading canceled by user.')
else:
self.print_logical_file(-1, 1)
else:
self.print_logical_file(-1, 1)
#
print('\n\nself.data_props')
print(self.data_props)
# TODO: rever self._curves_info
self._curves_info = OrderedDict()
for item_od in self.data_props:
for curve_set_name in list(item_od.keys()):
curve_info_od = item_od[curve_set_name]
curve_set_name = get_actual_objname(curve_set_name)
self._curves_info[curve_set_name] = []
for curve_name, curve_props_od in curve_info_od.items():
curve_actual_name = get_actual_objname(curve_name)
curve_unit = curve_props_od['UNITS'].lower()
self._curves_info[curve_set_name].append(
(curve_actual_name, curve_unit)
)
#
print('\n\nself._curves_info')
print(self._curves_info)
#
# print('\n\nself.data')
# print(self.data)
#
# """
# TODO: rever self._curves_data
self._curves_data = OrderedDict()
for curve_set_name, curves_info_list in self._curves_info.items():
self._curves_data[curve_set_name] = []
for idx in range(len(curves_info_list)):
self._curves_data[curve_set_name].append([])
#
for item_od in self.data:
for iflr_descriptor in list(item_od.keys()):
curve_data_od = item_od[iflr_descriptor]
curve_set_name = get_actual_objname(iflr_descriptor)
for curves_data_list in list(curve_data_od.values()):
for idx, value in enumerate(curves_data_list):
# print('idx val:', idx, value)
self._curves_data[curve_set_name][idx].append(value)
#
for curves_data_list in list(self._curves_data.values()):
for idx in range(len(curves_data_list)):
curves_data_list[idx] = | np.asarray(curves_data_list[idx]) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import math
import matplotlib.ticker as plticker
from matplotlib import cm
from matplotlib.colors import LogNorm
from matplotlib import rc, rcParams
from scipy import interpolate
rc('text',usetex=True)
rc('font',**{'family':'serif','serif':['DejaVu Serif Display']})
plt.rcParams.update({'font.size': 20})
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
r0 = 12.
z0 = 1.*r0+2.
rho = 0.15
gamma_0 = 1.09
zmax = 1.e8
gamma_acc = np.array([4,8,18])
z_acc = np.array([1e3,1e4,1e5])
sigma_f = np.array([1.,0.5,0.1])
sigma_0_1 = (1.+sigma_f[0])*gamma_acc[0]/gamma_0-1.
sigma_0_2 = (1.+sigma_f[1])*gamma_acc[1]/gamma_0-1.
sigma_0_3 = (1.+sigma_f[2])*gamma_acc[2]/gamma_0-1.
z_1 = np.logspace(0,np.log10(z_acc[0]),30)
z_2 = np.logspace(0,np.log10(z_acc[1]),40)
z_3 = np.logspace(0,np.log10(z_acc[2]),50)
z_1_post = np.logspace(np.log10(z_acc[0]),np.log10(zmax),70)
z_2_post = np.logspace(np.log10(z_acc[1]),np.log10(zmax),60)
z_3_post = np.logspace(np.log10(z_acc[2]),np.log10(zmax),50)
gamma_1 = gamma_0 + (gamma_acc[0]-gamma_0)*(z_1**(1./2.)-z0**(1./2.))/(z_acc[0]**(1./2.)-z0**(1./2.))
gamma_2 = gamma_0 + (gamma_acc[1]-gamma_0)*(z_2**(1./2.)-z0**(1./2.))/(z_acc[1]**(1./2.)-z0**(1./2.))
gamma_3 = gamma_0 + (gamma_acc[2]-gamma_0)*(z_3**(1./2.)-z0**(1./2.))/(z_acc[2]**(1./2.)-z0**(1./2.))
gamma_1_post = np.zeros(70)+gamma_acc[0]
gamma_2_post = np.zeros(60)+gamma_acc[1]
gamma_3_post = np.zeros(50)+gamma_acc[2]
sigma_1 = (gamma_0/gamma_1)*(1.+sigma_0_1)-1.
sigma_2 = (gamma_0/gamma_2)*(1.+sigma_0_2)-1.
sigma_3 = (gamma_0/gamma_3)*(1.+sigma_0_3)-1.
sigma_1_post = np.zeros(70)+sigma_f[0]
sigma_2_post = np.zeros(60)+sigma_f[1]
sigma_3_post = np.zeros(50)+sigma_f[2]
theta_1 = rho/gamma_1
theta_2 = rho/gamma_2
theta_3 = rho/gamma_3
r_1 = r0 + (z_1-z0)*np.tan(theta_1)
r_2 = r0 + (z_2-z0)*np.tan(theta_2)
r_3 = r0 + (z_3-z0)*np.tan(theta_3)
r_1_post = r_1[29] + (z_1_post-z_acc[0])*np.tan(rho/gamma_acc[0])
r_2_post = r_2[39] + (z_2_post-z_acc[1])* | np.tan(rho/gamma_acc[1]) | numpy.tan |
import warnings
import numpy as np
import pandas as pd
import cvxpy as cp
import pytest
import scipy.optimize as sco
from pypfopt import EfficientFrontier
from pypfopt import risk_models
from pypfopt import objective_functions
from pypfopt import exceptions
from tests.utilities_for_tests import get_data, setup_efficient_frontier
def test_data_source():
df = get_data()
assert isinstance(df, pd.DataFrame)
assert df.shape[1] == 20
assert len(df) == 7126
assert df.index.is_all_dates
def test_returns_dataframe():
df = get_data()
returns_df = df.pct_change().dropna(how="all")
assert isinstance(returns_df, pd.DataFrame)
assert returns_df.shape[1] == 20
assert len(returns_df) == 7125
assert returns_df.index.is_all_dates
assert not ((returns_df > 1) & returns_df.notnull()).any().any()
def test_efficient_frontier_inheritance():
ef = setup_efficient_frontier()
assert ef.clean_weights
assert ef.n_assets
assert ef.tickers
assert isinstance(ef._constraints, list)
assert isinstance(ef._lower_bounds, np.ndarray)
assert isinstance(ef._upper_bounds, np.ndarray)
def test_portfolio_performance():
ef = setup_efficient_frontier()
with pytest.raises(ValueError):
ef.portfolio_performance()
ef.min_volatility()
perf = ef.portfolio_performance()
assert isinstance(perf, tuple)
assert len(perf) == 3
assert isinstance(perf[0], float)
def test_min_volatility():
ef = setup_efficient_frontier()
w = ef.min_volatility()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in w.values()])
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.17931232481259154, 0.15915084514118694, 1.00101463282373),
)
def test_min_volatility_different_solver():
ef = setup_efficient_frontier()
ef.solver = "ECOS"
w = ef.min_volatility()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in w.values()])
test_performance = (0.179312, 0.159151, 1.001015)
np.testing.assert_allclose(ef.portfolio_performance(), test_performance, atol=1e-5)
ef = setup_efficient_frontier()
ef.solver = "OSQP"
w = ef.min_volatility()
np.testing.assert_allclose(ef.portfolio_performance(), test_performance, atol=1e-5)
ef = setup_efficient_frontier()
ef.solver = "SCS"
w = ef.min_volatility()
np.testing.assert_allclose(ef.portfolio_performance(), test_performance, atol=1e-3)
def test_min_volatility_no_rets():
# Should work with no rets, see issue #82
df = get_data()
S = risk_models.sample_cov(df)
ef = EfficientFrontier(None, S)
w = ef.min_volatility()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in w.values()])
np.testing.assert_almost_equal(ef.portfolio_performance()[1], 0.15915084514118694)
def test_min_volatility_tx_costs():
# Baseline
ef = setup_efficient_frontier()
ef.min_volatility()
w1 = ef.weights
# Pretend we were initally equal weight
ef = setup_efficient_frontier()
prev_w = np.array([1 / ef.n_assets] * ef.n_assets)
ef.add_objective(objective_functions.transaction_cost, w_prev=prev_w)
ef.min_volatility()
w2 = ef.weights
# TX cost should pull closer to prev portfolio
assert np.abs(prev_w - w2).sum() < np.abs(prev_w - w1).sum()
def test_min_volatility_short():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.min_volatility()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.1721356467349655, 0.1555915367269669, 0.9777887019776287),
)
# Shorting should reduce volatility
volatility = ef.portfolio_performance()[1]
ef_long_only = setup_efficient_frontier()
ef_long_only.min_volatility()
long_only_volatility = ef_long_only.portfolio_performance()[1]
assert volatility < long_only_volatility
def test_min_volatility_L2_reg():
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=1)
weights = ef.min_volatility()
assert isinstance(weights, dict)
assert set(weights.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in weights.values()])
ef2 = setup_efficient_frontier()
ef2.min_volatility()
# L2_reg should pull close to equal weight
equal_weight = np.full((ef.n_assets,), 1 / ef.n_assets)
assert (
np.abs(equal_weight - ef.weights).sum()
< np.abs(equal_weight - ef2.weights).sum()
)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.23129890623344232, 0.1955254118258614, 1.080672349748733),
)
def test_min_volatility_L2_reg_many_values():
ef = setup_efficient_frontier()
ef.min_volatility()
# Count the number of weights more 1%
initial_number = sum(ef.weights > 0.01)
for _ in range(10):
ef.add_objective(objective_functions.L2_reg, gamma=0.05)
ef.min_volatility()
np.testing.assert_almost_equal(ef.weights.sum(), 1)
new_number = sum(ef.weights > 0.01)
# Higher gamma should reduce the number of small weights
assert new_number >= initial_number
initial_number = new_number
def test_min_volatility_L2_reg_limit_case():
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=1e10)
ef.min_volatility()
equal_weights = np.array([1 / ef.n_assets] * ef.n_assets)
np.testing.assert_array_almost_equal(ef.weights, equal_weights)
def test_min_volatility_L2_reg_increases_vol():
# L2 reg should reduce the number of small weights
# but increase in-sample volatility.
ef_no_reg = setup_efficient_frontier()
ef_no_reg.min_volatility()
vol_no_reg = ef_no_reg.portfolio_performance()[1]
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=2)
ef.min_volatility()
vol = ef.portfolio_performance()[1]
assert vol > vol_no_reg
def test_min_volatility_tx_costs_L2_reg():
ef = setup_efficient_frontier()
prev_w = np.array([1 / ef.n_assets] * ef.n_assets)
ef.add_objective(objective_functions.transaction_cost, w_prev=prev_w)
ef.add_objective(objective_functions.L2_reg)
ef.min_volatility()
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.2316565265271545, 0.1959773703677164, 1.0800049318450338),
)
def test_min_volatility_cvxpy_vs_scipy():
# cvxpy
ef = setup_efficient_frontier()
ef.min_volatility()
w1 = ef.weights
# scipy
args = (ef.cov_matrix,)
initial_guess = np.array([1 / ef.n_assets] * ef.n_assets)
result = sco.minimize(
objective_functions.portfolio_variance,
x0=initial_guess,
args=args,
method="SLSQP",
bounds=[(0, 1)] * 20,
constraints=[{"type": "eq", "fun": lambda x: np.sum(x) - 1}],
)
w2 = result["x"]
cvxpy_var = objective_functions.portfolio_variance(w1, ef.cov_matrix)
scipy_var = objective_functions.portfolio_variance(w2, ef.cov_matrix)
assert cvxpy_var <= scipy_var
def test_min_volatility_sector_constraints():
sector_mapper = {
"T": "auto",
"UAA": "airline",
"SHLD": "retail",
"XOM": "energy",
"RRC": "energy",
"BBY": "retail",
"MA": "fig",
"PFE": "pharma",
"JPM": "fig",
"SBUX": "retail",
"GOOG": "tech",
"AAPL": "tech",
"FB": "tech",
"AMZN": "tech",
"BABA": "tech",
"GE": "utility",
"AMD": "tech",
"WMT": "retail",
"BAC": "fig",
"GM": "auto",
}
sector_upper = {
"tech": 0.2,
"utility": 0.1,
"retail": 0.2,
"fig": 0.4,
"airline": 0.05,
"energy": 0.2,
}
sector_lower = {"utility": 0.01, "fig": 0.02, "airline": 0.01}
# ef = setup_efficient_frontier()
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
ef.add_sector_constraints(sector_mapper, sector_lower, sector_upper)
weights = ef.min_volatility()
for sector in list(set().union(sector_upper, sector_lower)):
sector_sum = 0
for t, v in weights.items():
if sector_mapper[t] == sector:
sector_sum += v
assert sector_sum <= sector_upper.get(sector, 1) + 1e-5
assert sector_sum >= sector_lower.get(sector, 0) - 1e-5
def test_min_volatility_vs_max_sharpe():
# Test based on issue #75
expected_returns_daily = pd.Series(
[0.043622, 0.120588, 0.072331, 0.056586], index=["AGG", "SPY", "GLD", "HYG"]
)
covariance_matrix = pd.DataFrame(
[
[0.000859, -0.000941, 0.001494, -0.000062],
[-0.000941, 0.022400, -0.002184, 0.005747],
[0.001494, -0.002184, 0.011518, -0.000129],
[-0.000062, 0.005747, -0.000129, 0.002287],
],
index=["AGG", "SPY", "GLD", "HYG"],
columns=["AGG", "SPY", "GLD", "HYG"],
)
ef = EfficientFrontier(expected_returns_daily, covariance_matrix)
ef.min_volatility()
vol_min_vol = ef.portfolio_performance(risk_free_rate=0.00)[1]
ef = EfficientFrontier(expected_returns_daily, covariance_matrix)
ef.max_sharpe(risk_free_rate=0.00)
vol_max_sharpe = ef.portfolio_performance(risk_free_rate=0.00)[1]
assert vol_min_vol < vol_max_sharpe
def test_min_volatility_nonconvex_objective():
ef = setup_efficient_frontier()
ef.add_objective(lambda x: cp.sum((x + 1) / (x + 2) ** 2))
with pytest.raises(exceptions.OptimizationError):
ef.min_volatility()
def test_min_volatility_nonlinear_constraint():
ef = setup_efficient_frontier()
ef.add_constraint(lambda x: (x + 1) / (x + 2) ** 2 <= 0.5)
with pytest.raises(exceptions.OptimizationError):
ef.min_volatility()
def test_max_sharpe_long_only():
ef = setup_efficient_frontier()
w = ef.max_sharpe()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in w.values()])
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.33035037367760506, 0.21671276571944567, 1.4320816434015786),
)
def test_max_sharpe_long_weight_bounds():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(0.03, 0.13)
)
ef.max_sharpe()
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert ef.weights.min() >= 0.03
assert ef.weights.max() <= 0.13
bounds = [(0.01, 0.13), (0.02, 0.11)] * 10
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=bounds
)
ef.max_sharpe()
assert (0.01 <= ef.weights[::2]).all() and (ef.weights[::2] <= 0.13).all()
assert (0.02 <= ef.weights[1::2]).all() and (ef.weights[1::2] <= 0.11).all()
def test_max_sharpe_explicit_bound():
ef = setup_efficient_frontier()
ef.add_constraint(lambda w: w[0] >= 0.2)
ef.add_constraint(lambda w: w[2] == 0.15)
ef.add_constraint(lambda w: w[3] + w[4] <= 0.10)
ef.max_sharpe()
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert ef.weights[0] >= 0.2 - 1e-5
np.testing.assert_almost_equal(ef.weights[2], 0.15)
assert ef.weights[3] + ef.weights[4] <= 0.10 + 1e-5
def test_max_sharpe_short():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
w = ef.max_sharpe()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.4072439477276246, 0.24823487545231313, 1.5599900981762558),
)
sharpe = ef.portfolio_performance()[2]
ef_long_only = setup_efficient_frontier()
ef_long_only.max_sharpe()
long_only_sharpe = ef_long_only.portfolio_performance()[2]
assert sharpe > long_only_sharpe
def test_max_sharpe_L2_reg():
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=5)
with warnings.catch_warnings(record=True) as w:
weights = ef.max_sharpe()
assert len(w) == 1
assert isinstance(weights, dict)
assert set(weights.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in weights.values()])
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.2936875354933478, 0.22783545277575057, 1.2012508683744123),
)
ef2 = setup_efficient_frontier()
ef2.max_sharpe()
# L2_reg should pull close to equal weight
equal_weight = np.full((ef.n_assets,), 1 / ef.n_assets)
assert (
np.abs(equal_weight - ef.weights).sum()
< np.abs(equal_weight - ef2.weights).sum()
)
def test_max_sharpe_L2_reg_many_values():
warnings.filterwarnings("ignore")
ef = setup_efficient_frontier()
ef.max_sharpe()
# Count the number of weights more 1%
initial_number = sum(ef.weights > 0.01)
for i in range(1, 20, 2):
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=0.05 * i)
ef.max_sharpe()
np.testing.assert_almost_equal(ef.weights.sum(), 1)
new_number = sum(ef.weights > 0.01)
# Higher gamma should reduce the number of small weights
assert new_number >= initial_number
initial_number = new_number
def test_max_sharpe_L2_reg_different_gamma():
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=1)
ef.max_sharpe()
ef2 = setup_efficient_frontier()
ef2.add_objective(objective_functions.L2_reg, gamma=0.01)
ef2.max_sharpe()
# Higher gamma should pull close to equal weight
equal_weight = np.array([1 / ef.n_assets] * ef.n_assets)
assert (
np.abs(equal_weight - ef.weights).sum()
< np.abs(equal_weight - ef2.weights).sum()
)
def test_max_sharpe_L2_reg_reduces_sharpe():
# L2 reg should reduce the number of small weights at the cost of Sharpe
ef_no_reg = setup_efficient_frontier()
ef_no_reg.max_sharpe()
sharpe_no_reg = ef_no_reg.portfolio_performance()[2]
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=2)
ef.max_sharpe()
sharpe = ef.portfolio_performance()[2]
assert sharpe < sharpe_no_reg
def test_max_sharpe_L2_reg_with_shorts():
ef_no_reg = setup_efficient_frontier()
ef_no_reg.max_sharpe()
initial_number = sum(ef_no_reg.weights > 0.01)
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
)
ef.add_objective(objective_functions.L2_reg)
w = ef.max_sharpe()
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.3076093180094401, 0.22415982749409985, 1.2830546901496447),
)
new_number = sum(ef.weights > 0.01)
assert new_number >= initial_number
def test_max_sharpe_risk_free_rate():
ef = setup_efficient_frontier()
ef.max_sharpe()
_, _, initial_sharpe = ef.portfolio_performance()
ef = setup_efficient_frontier()
ef.max_sharpe(risk_free_rate=0.10)
_, _, new_sharpe = ef.portfolio_performance(risk_free_rate=0.10)
assert new_sharpe <= initial_sharpe
ef = setup_efficient_frontier()
ef.max_sharpe(risk_free_rate=0)
_, _, new_sharpe = ef.portfolio_performance(risk_free_rate=0)
assert new_sharpe >= initial_sharpe
def test_min_vol_pair_constraint():
ef = setup_efficient_frontier()
ef.min_volatility()
old_sum = ef.weights[:2].sum()
ef = setup_efficient_frontier()
ef.add_constraint(lambda w: (w[1] + w[0] <= old_sum / 2))
ef.min_volatility()
new_sum = ef.weights[:2].sum()
assert new_sum <= old_sum / 2 + 1e-4
def test_max_sharpe_pair_constraint():
ef = setup_efficient_frontier()
ef.max_sharpe()
old_sum = ef.weights[:2].sum()
ef = setup_efficient_frontier()
ef.add_constraint(lambda w: (w[1] + w[0] <= old_sum / 2))
ef.max_sharpe()
new_sum = ef.weights[:2].sum()
assert new_sum <= old_sum / 2 + 1e-4
def test_max_sharpe_sector_constraints_manual():
sector_mapper = {
"GOOG": "tech",
"AAPL": "tech",
"FB": "tech",
"AMZN": "tech",
"BABA": "tech",
"GE": "utility",
"AMD": "tech",
"WMT": "retail",
"BAC": "fig",
"GM": "auto",
"T": "auto",
"UAA": "airline",
"SHLD": "retail",
"XOM": "energy",
"RRC": "energy",
"BBY": "retail",
"MA": "fig",
"PFE": "pharma",
"JPM": "fig",
"SBUX": "retail",
}
sector_upper = {
"tech": 0.2,
"utility": 0.1,
"retail": 0.2,
"fig": 0.4,
"airline": 0.05,
"energy": 0.2,
}
sector_lower = {"utility": 0.01, "fig": 0.02, "airline": 0.01}
ef = setup_efficient_frontier()
for sector in sector_upper:
is_sector = [sector_mapper[t] == sector for t in ef.tickers]
ef.add_constraint(lambda w: cp.sum(w[is_sector]) <= sector_upper[sector])
for sector in sector_lower:
is_sector = [sector_mapper[t] == sector for t in ef.tickers]
ef.add_constraint(lambda w: cp.sum(w[is_sector]) >= sector_lower[sector])
weights = ef.max_sharpe()
for sector in list(set().union(sector_upper, sector_lower)):
sector_sum = 0
for t, v in weights.items():
if sector_mapper[t] == sector:
sector_sum += v
assert sector_sum <= sector_upper.get(sector, 1) + 1e-5
assert sector_sum >= sector_lower.get(sector, 0) - 1e-5
def test_max_sharpe_sector_constraints_auto():
sector_mapper = {
"GOOG": "tech",
"AAPL": "tech",
"FB": "tech",
"AMZN": "tech",
"BABA": "tech",
"GE": "utility",
"AMD": "tech",
"WMT": "retail",
"BAC": "fig",
"GM": "auto",
"T": "auto",
"UAA": "airline",
"SHLD": "retail",
"XOM": "energy",
"RRC": "energy",
"BBY": "retail",
"MA": "fig",
"PFE": "pharma",
"JPM": "fig",
"SBUX": "retail",
}
sector_upper = {
"tech": 0.2,
"utility": 0.1,
"retail": 0.2,
"fig": 0.4,
"airline": 0.05,
"energy": 0.2,
}
sector_lower = {"utility": 0.01, "fig": 0.02, "airline": 0.01}
ef = setup_efficient_frontier()
ef.add_sector_constraints(sector_mapper, sector_lower, sector_upper)
weights = ef.max_sharpe()
for sector in list(set().union(sector_upper, sector_lower)):
sector_sum = 0
for t, v in weights.items():
if sector_mapper[t] == sector:
sector_sum += v
assert sector_sum <= sector_upper.get(sector, 1) + 1e-5
assert sector_sum >= sector_lower.get(sector, 0) - 1e-5
def test_efficient_risk_sector_constraints_manual():
sector_mapper = {
"GOOG": "tech",
"AAPL": "tech",
"FB": "tech",
"AMZN": "tech",
"BABA": "tech",
"GE": "utility",
"AMD": "tech",
"WMT": "retail",
"BAC": "fig",
"GM": "auto",
"T": "auto",
"UAA": "airline",
"SHLD": "retail",
"XOM": "energy",
"RRC": "energy",
"BBY": "retail",
"MA": "fig",
"PFE": "pharma",
"JPM": "fig",
"SBUX": "retail",
}
sector_upper = {
"tech": 0.2,
"utility": 0.1,
"retail": 0.2,
"fig": 0.4,
"airline": 0.05,
"energy": 0.2,
}
sector_lower = {"utility": 0.01, "fig": 0.02, "airline": 0.01}
ef = setup_efficient_frontier()
for sector in sector_upper:
is_sector = [sector_mapper[t] == sector for t in ef.tickers]
ef.add_constraint(lambda w: cp.sum(w[is_sector]) <= sector_upper[sector])
for sector in sector_lower:
is_sector = [sector_mapper[t] == sector for t in ef.tickers]
ef.add_constraint(lambda w: cp.sum(w[is_sector]) >= sector_lower[sector])
weights = ef.efficient_risk(0.19)
for sector in list(set().union(sector_upper, sector_lower)):
sector_sum = 0
for t, v in weights.items():
if sector_mapper[t] == sector:
sector_sum += v
assert sector_sum <= sector_upper.get(sector, 1) + 1e-5
assert sector_sum >= sector_lower.get(sector, 0) - 1e-5
def test_efficient_risk_sector_constraints_auto():
sector_mapper = {
"GOOG": "tech",
"AAPL": "tech",
"FB": "tech",
"AMZN": "tech",
"BABA": "tech",
"GE": "utility",
"AMD": "tech",
"WMT": "retail",
"BAC": "fig",
"GM": "auto",
"T": "auto",
"UAA": "airline",
"SHLD": "retail",
"XOM": "energy",
"RRC": "energy",
"BBY": "retail",
"MA": "fig",
"PFE": "pharma",
"JPM": "fig",
"SBUX": "retail",
}
sector_upper = {
"tech": 0.2,
"utility": 0.1,
"retail": 0.2,
"fig": 0.4,
"airline": 0.05,
"energy": 0.2,
}
sector_lower = {"utility": 0.01, "fig": 0.02, "airline": 0.01}
ef = setup_efficient_frontier()
ef.add_sector_constraints(sector_mapper, sector_lower, sector_upper)
weights = ef.efficient_risk(0.19)
for sector in list(set().union(sector_upper, sector_lower)):
sector_sum = 0
for t, v in weights.items():
if sector_mapper[t] == sector:
sector_sum += v
assert sector_sum <= sector_upper.get(sector, 1) + 1e-5
assert sector_sum >= sector_lower.get(sector, 0) - 1e-5
def test_max_quadratic_utility():
ef = setup_efficient_frontier()
w = ef.max_quadratic_utility(risk_aversion=2)
assert isinstance(w, dict)
assert set(w.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.40064324249527605, 0.2917825266124642, 1.3045443362029479),
)
ret1, var1, _ = ef.portfolio_performance()
# increasing risk_aversion should lower both vol and return
ef.max_quadratic_utility(10)
ret2, var2, _ = ef.portfolio_performance()
assert ret2 < ret1 and var2 < var1
def test_max_quadratic_utility_with_shorts():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)
)
ef.max_quadratic_utility()
np.testing.assert_almost_equal(ef.weights.sum(), 1)
np.testing.assert_allclose(
ef.portfolio_performance(),
(1.3318330413711252, 1.0198436183533854, 1.2863080356272452),
)
def test_max_quadratic_utility_market_neutral():
ef = EfficientFrontier(
*setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)
)
ef.max_quadratic_utility(market_neutral=True)
np.testing.assert_almost_equal(ef.weights.sum(), 0)
np.testing.assert_allclose(
ef.portfolio_performance(),
(1.13434841843883, 0.9896404148973286, 1.1260134506071473),
)
def test_max_quadratic_utility_limit():
# in limit of large risk_aversion, this should approach min variance.
ef = setup_efficient_frontier()
ef.max_quadratic_utility(risk_aversion=1e10)
ef2 = setup_efficient_frontier()
ef2.min_volatility()
np.testing.assert_array_almost_equal(ef.weights, ef2.weights)
def test_max_quadratic_utility_L2_reg():
ef = setup_efficient_frontier()
ef.add_objective(objective_functions.L2_reg, gamma=5)
weights = ef.max_quadratic_utility()
assert isinstance(weights, dict)
assert set(weights.keys()) == set(ef.tickers)
np.testing.assert_almost_equal(ef.weights.sum(), 1)
assert all([i >= 0 for i in weights.values()])
np.testing.assert_allclose(
ef.portfolio_performance(),
(0.2602803268728476, 0.21603540587515674, 1.112226608872166),
)
ef2 = setup_efficient_frontier()
ef2.max_quadratic_utility()
# L2_reg should pull close to equal weight
equal_weight = np.full((ef.n_assets,), 1 / ef.n_assets)
assert (
np.abs(equal_weight - ef.weights).sum()
< | np.abs(equal_weight - ef2.weights) | numpy.abs |
"""
Plotting function
"""
from create_geometry import BladeGeometry
from lifting_line_solver import LiftingLineSolver
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from read_BEMdata_into_Python import read_matlab_data
import numpy as np
phase_shifts = np.linspace(0, 180, 4)
colors = ["lawngreen", "deepskyblue", "orangered", "darkviolet"]
# start plots
fig_rotor, ax_rotor = plt.subplots(1, 1, dpi=150)
nspan = 20
ntheta = 200
nblades = 3
spacing = 'equal'
nrotor = 2
first_blades_right = []
first_blade_left = []
for color, phase_shift in zip(colors, phase_shifts):
prop_geo = BladeGeometry(radius=50.0, tsr=8, v_inf=10.0, n_blades=3, n_span=nspan, phase_diff=phase_shift,
double_rotor=True,
n_theta=ntheta, spacing=spacing, a=0.33, xshift=0, yshift=100, zshift=0)
blade = prop_geo.bladepanels
theta = | np.linspace(0, 2 * np.pi) | numpy.linspace |
import json
import pytest
from lxml import etree
import numpy as np
import xarray as xr
import pandas as pd
import finch
import finch.processes
from finch.processes.wps_xclim_indices import XclimIndicatorBase
from finch.processes.wps_base import make_xclim_indicator_process
from . utils import execute_process, wps_input_file, wps_literal_input
from pathlib import Path
from pywps.app.exceptions import ProcessError
from pywps import configuration
from unittest import mock
from numpy.testing import assert_equal
from xclim.testing import open_dataset
K2C = 273.16
configuration.CONFIG['finch:metadata']['testing_session'] = "True"
def _get_output_standard_name(process_identifier):
for p in finch.processes.get_processes():
if p.identifier == process_identifier:
return p.xci.standard_name
@pytest.mark.parametrize("indicator", finch.processes.indicators)
def test_indicators_processes_discovery(indicator):
process = make_xclim_indicator_process(indicator, "Process", XclimIndicatorBase)
assert indicator.identifier == process.identifier
# Remove args not supported by finch: we remove special kinds,
# 50 is "kwargs". 70 is Dataset ('ds') and 99 is "unknown". All normal types are 0-9.
parameters = set([k for k, v in indicator.parameters.items() if v['kind'] < 50 or k == 'indexer'])
parameters.add("check_missing")
parameters.add("missing_options")
parameters.add("cf_compliance")
parameters.add("data_validation")
parameters.add("variable")
if "indexer" in parameters:
parameters.remove("indexer")
parameters.add("month")
parameters.add("season")
assert_equal(parameters, set(i.identifier for i in process.inputs), indicator.identifier)
# TODO : Extend test coverage
def test_processes(client, netcdf_datasets):
"""Run a dummy calculation for every process, keeping some default parameters."""
# indicators = finch.processes.indicators
processes = filter(lambda x: isinstance(x, XclimIndicatorBase), finch.processes.xclim.__dict__.values())
literal_inputs = {
"freq": "MS",
"window": "3",
"mid_date": "07-01",
"before_date": "07-01",
}
keep_defaults = ["thresh", "thresh_tasmin", "thresh_tasmax"]
attrs = xr.open_dataset(list(netcdf_datasets.values())[0], decode_times=False).attrs
for process in processes:
inputs = []
for process_input in process.inputs:
name = process_input.identifier
if name in netcdf_datasets.keys():
inputs.append(wps_input_file(name, netcdf_datasets[name]))
elif name in literal_inputs.keys():
inputs.append(wps_literal_input(name, literal_inputs[name]))
elif name in keep_defaults:
pass
else:
raise NotImplementedError
outputs = execute_process(client, process.identifier, inputs)
ds = xr.open_dataset(outputs[0])
output_variable = list(ds.data_vars)[0]
assert getattr(ds, output_variable).standard_name == process.xci.standard_name
assert ds.attrs['testing_session']
model = attrs["driving_model_id"]
experiment = attrs["driving_experiment_id"].replace(",", "+")
ensemble = (
f"r{attrs['driving_realization']}"
f"i{attrs['driving_initialization_method']}"
f"p{attrs['driving_physics_version']}"
)
date_start = pd.to_datetime(str(ds.time[0].values))
date_end = pd.to_datetime(str(ds.time[-1].values))
expected = (
f"{output_variable.replace('_', '-')}_"
f"{model}_{experiment}_{ensemble}_"
f"{date_start:%Y%m%d}-{date_end:%Y%m%d}.nc"
)
assert Path(outputs[0]).name == expected
def test_wps_daily_temperature_range_multiple(client, netcdf_datasets):
identifier = "dtr"
inputs = [wps_literal_input("freq", "YS")]
for _ in range(5):
inputs.append(wps_input_file("tasmax", netcdf_datasets["tasmax"]))
inputs.append(wps_input_file("tasmin", netcdf_datasets["tasmin"]))
with mock.patch(
"finch.processes.wps_xclim_indices.FinchProgressBar"
) as mock_progress:
outputs = execute_process(
client, identifier, inputs, output_names=["output_netcdf", "ref"]
)
assert mock_progress.call_args_list[0][1]["start_percentage"] == 0
assert mock_progress.call_args_list[0][1]["end_percentage"] == 20
assert mock_progress.call_args_list[4][1]["start_percentage"] == 80
assert mock_progress.call_args_list[4][1]["end_percentage"] == 100
et = etree.fromstring(outputs[1].data[0].encode())
urls = [e[2].text for e in et if e.tag.endswith("file")]
assert len(urls) == 5, "Containing 10 files"
assert len(set(urls)) == 5, "With different links"
assert urls[1].endswith("-1.nc")
def test_wps_daily_temperature_range_multiple_not_same_length(client, netcdf_datasets):
identifier = "dtr"
inputs = [wps_literal_input("freq", "YS")]
for _ in range(5):
inputs.append(wps_input_file("tasmax", netcdf_datasets["tasmax"]))
inputs.append(wps_input_file("tasmin", netcdf_datasets["tasmin"]))
inputs.pop()
with pytest.raises(ProcessError, match="must be equal"):
execute_process(
client, identifier, inputs, output_names=["output_netcdf", "ref"]
)
def test_heat_wave_frequency_window_thresh_parameters(client, netcdf_datasets):
identifier = "heat_wave_frequency"
inputs = [
wps_input_file("tasmax", netcdf_datasets["tasmax"]),
wps_input_file("tasmin", netcdf_datasets["tasmin"]),
wps_literal_input("window", "3"),
wps_literal_input("freq", "YS"),
wps_literal_input("thresh_tasmin", "20 degC"),
wps_literal_input("thresh_tasmax", "25 degC"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
assert ds.attrs["frequency"] == "yr"
assert ds.heat_wave_frequency.standard_name == _get_output_standard_name(identifier)
def test_heat_wave_index_thresh_parameter(client, netcdf_datasets):
identifier = "heat_wave_index"
inputs = [
wps_input_file("tasmax", netcdf_datasets["tasmax"]),
wps_literal_input("thresh", "30 degC"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
assert ds["heat_wave_index"].standard_name == _get_output_standard_name(identifier)
def test_missing_options(client, netcdf_datasets):
identifier = "tg_mean"
inputs = [
wps_input_file("tas", netcdf_datasets["tas_missing"]),
wps_literal_input("freq", "YS"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.tg_mean.isnull(), True)
inputs = [
wps_input_file("tas", netcdf_datasets["tas_missing"]),
wps_literal_input("freq", "YS"),
wps_literal_input("check_missing", "pct"),
wps_literal_input("missing_options", json.dumps({"pct": {"tolerance": 0.1}}))
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.tg_mean.isnull(), False)
def test_stats_process(client, netcdf_datasets):
"""Test stats and the capacity to choose the variable."""
identifier = "stats"
inputs = [
wps_input_file("da", netcdf_datasets["pr_discharge"]),
wps_literal_input("freq", "YS"),
wps_literal_input("op", "max"),
wps_literal_input("season", "JJA"),
wps_literal_input("variable", "discharge")
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.qsummermax.isnull(), False)
def test_freqanalysis_process(client, netcdf_datasets):
identifier = "freq_analysis"
inputs = [
wps_input_file("da", netcdf_datasets["discharge"]),
wps_literal_input("t", "2"),
wps_literal_input("t", "50"),
wps_literal_input("freq", "YS"),
wps_literal_input("mode", "max"),
wps_literal_input("season", "JJA"),
wps_literal_input("dist", "gumbel_r"),
wps_literal_input("variable", "discharge")
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.q1maxsummer.shape, (2, 5, 6))
class TestFitProcess:
identifier = "fit"
def test_simple(self, client, netcdf_datasets):
inputs = [
wps_input_file("da", netcdf_datasets["discharge"]),
wps_literal_input("dist", "norm"),
]
outputs = execute_process(client, self.identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.params.shape, (2, 5, 6))
def test_nan(self, client, q_series, tmp_path):
q_series([333, 145, 203, 109, 430, 230, np.nan]).to_netcdf(tmp_path / "q.nc")
inputs = [
wps_input_file("da", tmp_path / "q.nc"),
wps_literal_input("dist", "norm"),
]
outputs = execute_process(client, self.identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.params.isnull(), False)
def test_rain_approximation(client, pr_series, tas_series, tmp_path):
identifier = "prlp"
pr_series(np.ones(10)).to_netcdf(tmp_path / 'pr.nc')
tas_series( | np.arange(10) | numpy.arange |
import copy
import gc
import glob
import os
import warnings
import aplpy
import linetools.utils as ltu
import numpy as np
import numpy.ma as ma
import pyregion
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.io.ascii.sextractor import SExtractor
from astropy.modeling import models, fitting
from astropy.table import Table
from astropy.utils import isiterable
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.utils import name_from_coord
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy import ndimage
import PyMUSE.utils as mcu
class MuseCube:
"""
Class to handle VLT/MUSE data
"""
def __init__(self, filename_cube, filename_white=None, pixelsize=0.2 * u.arcsec, n_fig=1,
flux_units=1E-20 * u.erg / u.s / u.cm ** 2 / u.angstrom, vmin=None, vmax=None, wave_cal='air'):
"""
Parameters
----------
filename_cube: string
Name of the MUSE datacube .fits file
filename_white: string
Name of the MUSE white image .fits file
pixel_size : float or Quantity, optional
Pixel size of the datacube, if float it assumes arcsecs.
Default is 0.2 arcsec
n_fig : int, optional
XXXXXXXX
flux_units : Quantity
XXXXXXXXXX
"""
# init
self.color = False
self.cmap = ""
self.flux_units = flux_units
self.n = n_fig
plt.close(self.n)
self.wave_cal = wave_cal
self.filename = filename_cube
self.filename_white = filename_white
self.load_data()
self.white_data = fits.open(self.filename_white)[1].data
self.hdulist_white = fits.open(self.filename_white)
self.white_data = np.where(self.white_data < 0, 0, self.white_data)
if not vmin:
self.vmin=np.nanpercentile(self.white_data,0.25)
else:
self.vmin = vmin
if not vmax:
self.vmax=np.nanpercentile(self.white_data,98.)
else:
self.vmax = vmax
self.gc2 = aplpy.FITSFigure(self.filename_white, figure=plt.figure(self.n))
self.gc2.show_grayscale(vmin=self.vmin, vmax=self.vmax)
# self.gc = aplpy.FITSFigure(self.filename, slices=[1], figure=plt.figure(20))
self.pixelsize = pixelsize
gc.enable()
# plt.close(20)
print("MuseCube: Ready!")
def load_data(self):
hdulist = fits.open(self.filename)
print("MuseCube: Loading the cube fluxes and variances...")
# import pdb; pdb.set_trace()
self.cube = ma.MaskedArray(hdulist[1].data)
self.stat = ma.MaskedArray(hdulist[2].data)
print("MuseCube: Defining master masks (this may take a while but it is for the greater good).")
# masking
self.mask_init = np.isnan(self.cube) | np.isnan(self.stat)
self.cube.mask = self.mask_init
self.stat.mask = self.mask_init
# for ivar weighting ; consider creating it in init ; takes long
# self.flux_over_ivar = self.cube / self.stat
self.header_1 = hdulist[1].header # Necesito el header para crear una buena copia del white.
self.header_0 = hdulist[0].header
if self.filename_white is None:
print("MuseCube: No white image given, creating one.")
w_data = copy.deepcopy(self.create_white(save=False).data)
w_header_0 = copy.deepcopy(self.header_0)
w_header_1 = copy.deepcopy(self.header_1)
# These loops remove the third dimension from the header's keywords. This is neccesary in order to
# create the white image and preserve the cube astrometry
for i in w_header_0.keys():
if '3' in i:
del w_header_0[i]
for i in w_header_1.keys():
if '3' in i:
del w_header_1[i]
# prepare the header
hdu = fits.HDUList()
hdu_0 = fits.PrimaryHDU(header=w_header_0)
hdu_1 = fits.ImageHDU(data=w_data, header=w_header_1)
hdu.append(hdu_0)
hdu.append(hdu_1)
hdu.writeto('new_white.fits', clobber=True)
self.filename_white = 'new_white.fits'
print("MuseCube: `new_white.fits` image saved to disk.")
def color_gui(self, cmap):
"""
Function to change the cmap of the canvas
:param cmap: string. matplotlib's color map. cmap = 'none' to gray scale again
:return:
"""
if cmap == 'none':
self.color = False
self.cmap = ""
else:
self.color = True
self.cmap = cmap
self.reload_canvas()
def get_smoothed_white(self, npix=2, save=True, show=False, **kwargs):
"""Gets an smoothed version (Gaussian of sig=npix)
of the white image. If save is True, it writes a file
to disk called `smoothed_white.fits`.
**kwargs are passed down to scipy.ndimage.gaussian_filter()
"""
hdulist = self.hdulist_white
im = self.white_data
if npix > 0:
smooth_im = ndimage.gaussian_filter(im, sigma=npix, **kwargs)
else:
smooth_im = im
if save:
hdulist[1].data = smooth_im
prihdr = hdulist[0].header
comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix)
# print(comment)
prihdr['history'] = comment
hdulist.writeto('smoothed_white.fits', clobber=True)
if show:
fig = aplpy.FITSFigure('smoothed_white.fits', figure=plt.figure())
fig.show_grayscale(vmin=self.vmin,vmax=self.vmax)
return smooth_im
def spec_to_vacuum(self, spectrum):
spectrum_vac = spectrum
if self.wave_cal == 'air':
spectrum_vac.meta['airvac'] = 'air'
spectrum_vac.airtovac()
return spectrum_vac
else:
return spectrum_vac
def spatial_smooth(self, npix, output="smoothed.fits", test=False, **kwargs):
"""Applies Gaussian filter of std=npix in both spatial directions
and writes it to disk as a new MUSE Cube.
Notes: the STAT cube is not touched.
Parameters
----------
npix : int
Std of Gaussian kernel in spaxel units.
output : str, optional
Name of the output file
test : bool, optional
Whether to check for flux being conserved
**kwargs are passed down to scipy.ndimage.gaussian_filter()
Return
------
Writes a new file to disk.
"""
if not isinstance(npix, int):
raise ValueError("npix must be integer.")
cube_new = copy.deepcopy(self.cube)
ntot = len(self.cube)
for wv_ii in range(ntot):
print('{}/{}'.format(wv_ii + 1, ntot))
image_aux = self.cube[wv_ii, :, :]
smooth_ii = ma.MaskedArray(ndimage.gaussian_filter(image_aux, sigma=npix, **kwargs))
smooth_ii.mask = image_aux.mask | np.isnan(smooth_ii)
# test the fluxes are conserved
if test:
gd_pix = ~smooth_ii.mask
try:
med_1 = np.nansum(smooth_ii[gd_pix])
med_2 = np.nansum(image_aux[gd_pix])
print(med_1, med_2, (med_1 - med_2) / med_1)
np.testing.assert_allclose(med_1, med_2, decimal=4)
except AssertionError:
import pdb
pdb.set_trace()
cube_new[wv_ii, :, :] = smooth_ii
# import pdb; pdb.set_trace()
hdulist = fits.open(self.filename)
hdulist[1].data = cube_new.data
prihdr = hdulist[0].header
comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix)
print(comment)
prihdr['history'] = comment
hdulist.writeto(output, clobber=True)
print("MuseCube: new smoothed cube written to {}".format(output))
def get_mini_image(self, center, halfsize=15):
"""
:param center: tuple of coordinates, in pixels
:param size: length of the square around center
:return: ndarray which contain the image
"""
side = 2 * halfsize + 1
image = [[0 for x in range(side)] for y in range(side)]
data_white = fits.open(self.filename_white)[1].data
center_x = center[0]
center_y = center[1]
for i in xrange(center_x - halfsize - 1, center_x + halfsize):
for j in xrange(center_y - halfsize - 1, center_y + halfsize):
i2 = i - (center_x - halfsize)
j2 = j - (center_y - halfsize)
image[j2][i2] = data_white[j - 1][i - 1]
return image
def get_gaussian_seeing_weighted_spec(self, x_c, y_c, radius, seeing=4):
"""
Function to extract the spectrum of a circular aperture defined by x_c, y_c and radius in spaxel space.
The spectrum is weighted by a 2d gaussian centered at the center of the aperture, with a std = seeing in spaxels
:param x_c: x coordinate of the center of the aperture (spaxel)
:param y_c: y coordiante of the center of the aperture (spaxel)
:param radius: radius of the circular aperture
:param seeing: standard deviation of the gaussian in spaxels
:return: XSpectrum1D object
"""
import scipy.ndimage.filters as fi
new_3dmask = self.get_mini_cube_mask_from_ellipse_params(x_c, y_c, radius)
w = self.wavelength
n = len(w)
fl = np.zeros(n)
sig = np.zeros(n)
self.cube.mask = new_3dmask
for wv_ii in range(n):
mask = new_3dmask[wv_ii]
center = np.zeros(mask.shape) ###Por alguna razon no funciona si cambio la asignacion a np.zeros_like(mask)
center[y_c][x_c] = 1
weigths = ma.MaskedArray(fi.gaussian_filter(center, seeing))
weigths.mask = mask
weigths = weigths / np.sum(weigths)
fl[wv_ii] = np.sum(self.cube[wv_ii] * weigths)
sig[wv_ii] = np.sqrt(np.sum(self.stat[wv_ii] * (weigths ** 2)))
self.cube.mask = self.mask_init
return XSpectrum1D.from_tuple((w, fl, sig))
def get_spec_spaxel(self, x, y, coord_system='pix', n_figure=2, empirical_std=False, save=False):
"""
Gets the spectrum of a single spaxel (xy) of the MuseCube
:param x: x coordinate of the spaxel
:param y: y coordinate of the spaxel
:param coord_system: 'pix' or 'wcs'
:return: spec: XSpectrum1D object
"""
if coord_system == 'wcs':
x_c, y_c = self.w2p(x, y)
x_world, y_world = x, y
else:
x_c, y_c = x, y
x_world, y_world = self.p2w(x, y)
region_string = self.ellipse_param_to_ds9reg_string(x_c, y_c, 1, 1, 0, coord_system='pix')
self.draw_pyregion(region_string)
w = self.wavelength
n = len(w)
spec = np.zeros(n)
sigma = np.zeros(n)
for wv_ii in range(n):
spec[wv_ii] = self.cube.data[wv_ii][int(y_c)][int(x_c)]
sigma[wv_ii] = np.sqrt(self.stat.data[wv_ii][int(y_c)][int(x_c)])
spec = XSpectrum1D.from_tuple((self.wavelength, spec, sigma))
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
coords = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
name = name_from_coord(coords)
plt.title(name)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
if save:
spec.write_to_fits(name + '.fits')
return spec
def get_spec_from_ellipse_params(self, x_c, y_c, params, coord_system='pix', mode='wwm', npix=0, frac=0.1,
n_figure=2, empirical_std=False, save=False, color='green'):
"""
Obtains a combined spectrum of spaxels within a geometrical region defined by
x_c, y_c, param
:param x_c: x coordinate of the center of the ellipse
:param y_c: y coordinate of the center of the ellipse
:param params: Either a float that will be interpreted as a radius, or an iterable [a,b,theta] with the ellipse parameters
:param coord_system: str. Default = 'pix'.
If coord_system = 'wcs' the coordinates will be considered as degrees
:param mode: str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `gaussian` - Weighted mean. Weights are obtained from a 2D gaussian fit of the bright profile
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weights given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. FLoat, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:return: spec: XSpectrum1D object
"""
if mode == 'gaussian':
spec = self.get_gaussian_profile_weighted_spec(x_c=x_c, y_c=y_c, params=params)
else:
new_mask = self.get_mini_cube_mask_from_ellipse_params(x_c, y_c, params, coord_system=coord_system,color=color)
spec = self.spec_from_minicube_mask(new_mask, mode=mode, npix=npix, frac=frac)
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
if coord_system == 'wcs':
x_world, y_world = x_c, y_c
else:
x_world, y_world = self.p2w(x_c, y_c)
coords = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
name = name_from_coord(coords)
plt.title(name)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
if save:
spec.write_to_fits(name + '.fits')
return spec
def get_spec_from_interactive_polygon_region(self, mode='wwm', npix=0, frac=0.1,
n_figure=2,
empirical_std=False, save=False):
"""
Function used to interactively define a region and extract the spectrum of that region
To use this function, the class must have been initialized in a "ipython --pylab qt" enviroment
It's also needed the package roipoly. Installation instructions and LICENSE in:
https://github.com/jdoepfert/roipoly.py/
:param mode: str, default = wwm
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. FLoat, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:return: spec: XSpectrum1D object
"""
from roipoly import roipoly
current_fig = plt.figure(self.n)
MyROI = roipoly(roicolor='r', fig=current_fig)
raw_input("MuseCube: Please select points with left click. Right click and Enter to continue...")
print("MuseCube: Calculating the spectrum...")
mask = MyROI.getMask(self.white_data)
mask_inv = np.where(mask == 1, 0, 1)
complete_mask = self.mask_init + mask_inv
new_3dmask = np.where(complete_mask == 0, False, True)
spec = self.spec_from_minicube_mask(new_3dmask, mode=mode, npix=npix, frac=frac)
self.reload_canvas()
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
plt.ylabel('Flux (' + str(self.flux_units) + ')')
plt.xlabel('Wavelength (Angstroms)')
plt.title('Polygonal region spectrum ')
plt.figure(self.n)
MyROI.displayROI()
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
spec = self.spec_to_vacuum(spec)
if save:
spec.write_to_fits('Poligonal_region_spec.fits')
return spec
def params_from_ellipse_region_string(self, region_string, deg=False):
"""
Function to get the elliptical parameters of a region_string.
If deg is True, only will be returned the center in degrees.
Otherwise, all parameters will be returned in pixels
:param region_string: Region defined as string using ds9 format
:param deg: If True, only the center of the ellipse will be returned, in degrees.
:return: x_center,y_center,params, parameter of the ellipse defined in region_string
"""
r = pyregion.parse(region_string)
if deg:
x_c, y_c = r[0].coord_list[0], r[0].coord_list[1]
if r[0].coord_format == 'physical' or r[0].coord_format == 'image':
x_world, y_world = self.p2w(x_c - 1, y_c - 1)
else:
x_world, y_world = x_c, y_c
return x_world, y_world
else:
if r[0].coord_format == 'physical' or r[0].coord_format == 'image':
x_c, y_c, params = r[0].coord_list[0], r[0].coord_list[1], r[0].coord_list[2:5]
else:
x_world = r[0].coord_list[0]
y_world = r[0].coord_list[1]
par = r[0].coord_list[2:5]
x_c, y_c, params = self.ellipse_params_to_pixel(x_world, y_world, params=par)
return x_c - 1, y_c - 1, params
def get_spec_from_region_string(self, region_string, mode='wwm', npix=0., frac=0.1, empirical_std=False, n_figure=2,
save=False):
"""
Obtains a combined spectrum of spaxels within geametrical region defined by the region _string, interpretated by ds9
:param region_string: str
Region defined by a string, using ds9 format (ellipse only in gaussian method)
example: region_string = 'physical;ellipse(100,120,10,5,35) # color = green'
:param mode: str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `gaussian` - Weighted mean. Weights are obtained from a 2D gaussian fit of the bright profile (for elliptical regions only)
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
:param frac. Float, default = 0.1
Parameter needed for wfrac mode
:param npix: int. Default = 0
Standard deviation of the gaussian filter to smooth (Only in wwm methods)
:param n_figure: int. Default = 2. Figure to display the spectrum
:param empirical_std: boolean. Default = False.
If True, the errors of the spectrum will be determined empirically
:param save: boolean. Default = False
If True, the spectrum will be saved in hard_disk
:return: spec: XSpectrum1D object
"""
if mode == 'gaussian':
spec = self.get_gaussian_profile_weighted_spec(region_string_=region_string)
else:
new_mask = self.get_mini_cube_mask_from_region_string(region_string)
spec = self.spec_from_minicube_mask(new_mask, mode=mode, npix=npix, frac=frac)
if empirical_std:
spec = mcu.calculate_empirical_rms(spec)
self.draw_pyregion(region_string)
spec = self.spec_to_vacuum(spec)
plt.figure(n_figure)
plt.plot(spec.wavelength, spec.flux)
x_world, y_world = self.params_from_ellipse_region_string(region_string, deg=True)
coords = SkyCoord(ra=x_world, dec=y_world, frame='icrs', unit='deg')
name = name_from_coord(coords)
plt.title(name)
plt.xlabel('Angstroms')
plt.ylabel('Flux (' + str(self.flux_units) + ')')
if save:
spec.write_to_fits(name + '.fits')
return spec
def draw_ellipse_params(self, xc, yc, params, color='green'):
"""
Function to draw in the interface the contour of the elliptical region defined by (xc,yc,params)
:param xc: x coordinate of the center of the ellipse
:param yc: y coordinate of the center of the ellipse
:param params: either a single radius or [a,b,theta] iterable
:param color: color to draw
:return:
"""
if isinstance(params, (float, int)):
params = [params, params, 0]
region_string = self.ellipse_param_to_ds9reg_string(xc, yc, params[0], params[1], params[2], color=color)
self.draw_pyregion(region_string)
def draw_pyregion(self, region_string):
"""
Function used to draw in the interface the contour of the region defined by region_string
:param region_string: str. Region defined by a string using ds9 format
:return: None
"""
hdulist = self.hdulist_white
r = pyregion.parse(region_string).as_imagecoord(hdulist[1].header)
fig = plt.figure(self.n)
ax = fig.axes[0]
patch_list, artist_list = r.get_mpl_patches_texts(origin=0)
patch = patch_list[0]
ax.add_patch(patch)
def spec_from_minicube_mask(self, new_3dmask, mode='wwm', npix=0, frac=0.1):
"""Given a 3D mask, this function provides a combined spectrum
of all non-masked voxels.
Parameters
----------
new_3dmask : np.array of same shape as self.cube
The 3D mask
mode : str
Mode for combining spaxels:
* `ivar` - Inverse variance weighting, variance is taken only spatially, from a "white variance image"
* `sum` - Sum of total flux
* `wwm` - 'White Weighted Mean'. Weigted mean, weights are obtained from the white image, smoothed using a gaussian filter of sigma = npix. If npix=0, no smooth is done
* `ivarwv` - Weighted mean, the weight of every pixel is given by the inverse of it's variance
* `mean` - Mean of the total flux
* `median` - Median of the total flux
* `wwm_ivarwv' - Weights given by both, `ivarwv` and `wwm`
* `wwm_ivar` - Weghts given by both, `wwm` and `ivar`
* `wfrac` - It only takes the fraction `frac` of brightest spaxels (white) in the region
(e.g. frac=0.1 means 10% brightest) with equal weight.
Returns
-------
An XSpectrum1D object (from linetools) with the combined spectrum.
"""
if mode not in ['ivarwv', 'ivar', 'mean', 'median', 'wwm', 'sum', 'wwm_ivarwv', 'wwm_ivar', 'wfrac']:
raise ValueError("Not ready for this type of `mode`.")
if np.shape(new_3dmask) != np.shape(self.cube.mask):
raise ValueError("new_3dmask must be of same shape as the original MUSE cube.")
n = len(self.wavelength)
fl = np.zeros(n)
er = np.zeros(n)
if mode == 'ivar':
var_white = self.create_white(stat=True, save=False)
elif mode in ['wwm', 'wwm_ivarwv', 'wwm_ivar', 'wfrac']:
smoothed_white = self.get_smoothed_white(npix=npix, save=False)
if mode == 'wwm_ivar':
var_white = self.create_white(stat=True, save=False)
elif mode == 'wfrac':
mask2d = new_3dmask[1]
self.wfrac_show_spaxels(frac=frac, mask2d=mask2d, smoothed_white=smoothed_white)
warn = False
for wv_ii in xrange(n):
mask = new_3dmask[wv_ii] # 2-D mask
im_fl = self.cube[wv_ii][~mask] # this is a 1-d np.array()
im_var = self.stat[wv_ii][~mask] # this is a 1-d np.array()
if len(im_fl) == 0:
fl[wv_ii] = 0
er[wv_ii] = 99
elif mode == 'wwm':
im_weights = smoothed_white[~mask]
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if np.sum(im_weights) == 0:
im_weights[:] = 1. / n_weights
warn = True
im_weights = im_weights / np.sum(im_weights)
fl[wv_ii] = np.sum(im_fl * im_weights)
er[wv_ii] = np.sqrt(np.sum(im_var * (im_weights ** 2)))
elif mode == 'ivar':
im_var_white = var_white[~mask]
im_weights = 1. / im_var_white
n_weights = len(im_weights)
im_weights = np.where(np.isnan(im_weights), 0, im_weights)
if | np.sum(im_weights) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from __future__ import division, print_function
"""diffacto.diffacto: provides entry point main()."""
__version__ = "1.0.5"
import csv
import re
import warnings
from collections import defaultdict
from multiprocessing import Pool
from scipy import optimize, stats
import networkx as nx
import numpy as np
import pandas
from numpy import array, isfinite, nanmean, nansum
from pyteomics import fasta
# from numba import jit # # Enable just-in-time compiler for speeding up
# @jit
def fast_farms(
probes: np.array,
weight: float = 0.5,
mu: float = 0,
max_iter: int = 1000,
force_iter: bool = False,
min_noise: float = 1e-4,
fill_nan: float = 0.0,
):
"""Bayesian Factor Analysis for Proteomics Summarization
A python translation of function "generateExprVal.method.farms" from
Bioconductor FARMS.
[http://www.bioconductor.org/packages/release/bioc/html/farms.html]
[http://www.bioinf.jku.at/publications/papers/farms/supplementary.ps]
Reference:
<NAME>, <NAME> and <NAME> (2006). A new summarization
method for affymetrix probe level data. Bioinformatics, 22(8),
http://bioinformatics.oxfordjournals.org/cgi/content/abstract/22/8/943.
Inputs:
probes: Peptide abundance array (N peptides, M samples) in log scale.
weight: Hyperparameter (backscale factor) value in the range of [0,1]
which determines the influence of the prior.
mu: Hyperparameter value which allows to quantify different aspects
of potential prior knowledge. A value near zero assumes that
most genes do not contain a signal, and introduces a bias for
loading matrix elements near zero. """
readouts = np.array(probes)
if fill_nan != 0:
readouts[np.isnan(readouts)] = fill_nan
# normalize and transform X
X = np.nan_to_num(readouts).T
X = X - np.nanmean(X, axis=0)
xsd = np.nanstd(X, axis=0)
xsd[xsd < min_noise] = 1.0
X /= xsd
X[~isfinite(X)] = 0
n_samples, n_features = X.shape
C = np.cov(X.T, ddof=0)
# positive definite
C = 0.5 * (C + C.T)
C[np.where(C < 0)] = 0
# robustness
U, s, V = np.linalg.svd(C)
s[s < min_noise] = min_noise
C = U.dot(np.diag(s)).dot(V)
# initiation
λ = np.sqrt(np.diag(C) * 0.75)
ψ = np.diag(C) - λ ** 2
old_psi = ψ
old_lambda = λ
alpha = weight * n_features
E = 1.0
min_noise_square = min_noise ** 2
C_diag = np.diag(C)
for i in range(max_iter):
# E step
φ = λ / ψ
a = 1 + np.matmul(λ.reshape(1, -1), φ.reshape(-1, 1))
η = φ / a
ζ = C.dot(η.T)
E = 1 - η.dot(λ) + η.dot(ζ)
# M step
λ = ζ.T / (E + ψ * alpha)
λ = np.asarray(λ)[0]
ψ = C_diag - np.asarray(ζ)[0] * λ + ψ * alpha * λ * (mu - λ)
ψ = np.maximum(ψ, min_noise_square)
if (
ψ[-1] == old_psi[-1]
and ψ[0] == old_psi[0]
and np.array_equal(ψ, old_psi)
and np.array_equal(λ, old_lambda)
):
break
if not force_iter:
if abs(ψ - old_psi).max() / old_psi.max() < min_noise / 10:
break
old_psi = ψ
old_lambda = λ
loading = np.sqrt(E[0, 0]) * λ
φ = loading / ψ
weights = loading / loading.max() # rescale loadings to the range of [0,1]
noise = 1 / (1 + np.matmul(loading.reshape(1, -1), φ.reshape(-1, 1)))
noise = noise[0, 0]
return weights, noise
# @jit(nogil=True)
def fast_gmean_nomissing(weights, pep_abd, group_ix):
"""
Calculate geometric means based on non-missing peptide readouts.
"""
abd_w = pep_abd * weights[..., None]
one_w = abd_w / abd_w * weights[..., None]
a_sums = np.nansum(abd_w, axis=0)
w_sums = np.nansum(one_w, axis=0)
expr = a_sums[group_ix].sum(axis=1) / w_sums[group_ix].sum(axis=1)
return expr
# @jit(nogil=True)
def sum_squares(pep_abd, group_ix, estimates):
"""
Calculate sum of squared residuals
"""
global nGroups
residual = 0.0
for i in range(nGroups):
res = pep_abd[:, group_ix[i]] - estimates[i]
residual += np.nansum(res * res)
return residual
# @jit(nogil=True)
def f_ANOVA(pep_abd, group_ix, estimates, null_ave, dof_loss=0):
"""
Perform ANOVA
Inputs:
pep_abd: Peptide abundance matrix
group_ix: Index of sample groups
estimates: Estimated abundances of sample groups
null_ave: Global average
dof_loss: Loss of dof due to averaging
Return:
f: Value of F-statistic
dof1: Degree of freedom of model 1
dof2: Degree of freedom of model 2
"""
global nGroups
ss_total = sum_squares(pep_abd, group_ix, null_ave)
ss_resid = sum_squares(pep_abd, group_ix, estimates)
dof1 = nGroups - 1
dof2 = isfinite(pep_abd).sum() - nGroups - dof_loss
if dof2 <= 0:
return np.nan, dof1, dof2
f = ((ss_total - ss_resid) / dof1) / (ss_resid / dof2)
return f, dof1, dof2
def mv_impute(pep_abd, group_ix, least_missing=0.99, impute_as=0.001):
""" Impute missing values when having a large proportion in a sample group.
Inputs:
pep_abd: n peptides, m samples, in linear scale
group_ix: grouping index for each of the m samples
least_missing: set the minimum threshold of missing rate to trigger the imputation (Default: 99%).
impute_as: set missing values in the sample to this value
Return:
numpy array after replacing missing values with imputed values
"""
aT = np.array(pep_abd).T
for ix in group_ix:
if np.isnan(aT[ix]).sum() > least_missing * len(aT[ix].flatten()):
val = aT[ix]
val[np.where(np.isnan(val))] = impute_as
aT[ix] = val
return aT.T
# @jit(nogil=True)
def weighted_average(weights, pep_abd, group_ix):
"""
Calculate weighted geometric means for sample groups
Inputs:
weights: Weights of peptides after filtering by loading threshold
pep_abd: Peptide abundances after filtering by loading threshold
group_ix: Array indexes of sample groups
Return:
expr: Estimated expression levels
"""
global nGroups
abd_w = pep_abd * weights[..., None]
count_peptides = np.sum(~np.isnan(pep_abd), axis = 0)
one_w = abd_w / abd_w * weights[..., None]
a_sums = np.nansum(abd_w, axis=0)
w_sums = | np.nansum(one_w, axis=0) | numpy.nansum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.