prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from pytorch2keras.converter import pytorch_to_keras
class LayerTest(nn.Module):
def __init__(self, kernel_size=3, padding=1, stride=1):
super(LayerTest, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=kernel_size, padding=padding, stride=stride)
def forward(self, x):
x = self.pool(x)
return x
def check_error(output, k_model, input_np, epsilon=1e-5):
pytorch_output = output.data.numpy()
keras_output = k_model.predict(input_np)
error = np.max(pytorch_output - keras_output)
print('Error:', error)
assert error < epsilon
return error
if __name__ == '__main__':
max_error = 0
for kernel_size in [1, 3, 5, 7]:
for padding in [0, 1, 3]:
for stride in [1, 2, 3, 4]:
# RuntimeError: invalid argument 2: pad should be smaller than half of kernel size, but got padW = 1, padH = 1, kW = 1,
if padding > kernel_size / 2:
continue
model = LayerTest(kernel_size=kernel_size, padding=padding, stride=stride)
model.eval()
input_np = | np.random.uniform(0, 1, (1, 3, 224, 224)) | numpy.random.uniform |
import numpy as np
import pdb
from tqdm import tqdm
import torch as t
import torch.nn.functional as F
import torch.nn as nn
from scipy.stats import chi2, truncnorm
import sys
import pickle as pkl
import cdm_pytorch as cp
import matplotlib.pyplot as plt
def gen_choices_from_rank(order):
order = order[::-1]
vec = np.zeros([len(order),1])
choice_sets = np.zeros([len(order)-1, len(order)])
choices = np.zeros([len(order)-1, len(order)])
vec[order[0]] = 1
for i in range(1,len(order)):
vec[order[i]] = 1
choice_sets[i-1] = vec[:,0]
choices[i-1, order[i]] = 1
return choice_sets, choices
def gen_CDM_ranking(U):
order = []
vec = np.ones(U.shape[0], dtype=np.bool)
for idx in range(U.shape[0]-1):
p = np.exp(U[:,vec][vec,:].sum(-1))
p /= p.sum()
order.append(vec.nonzero()[0][np.random.choice(len(p),p=p)])
vec[order[-1]] = 0
order.append(vec.nonzero()[0][0])
return np.array(order)
def gen_O_J_v2(m,n,U):
# EDITED FOR CDM
# Given a fixed CDM U, it will generate lots of choices.
O, J = [], []
for i in tqdm(range(m)):
order = gen_CDM_ranking(U)
o, j = gen_choices_from_rank(order)
O.append(o)
J.append(j)
O = np.concatenate(O,0)
J = np.concatenate(J,0)
return O, J
def set_to_edge(o):
n = len(o)
o_nnz = o.nonzero()[0]
L_list = np.zeros([n*(n-1), len(o_nnz)])
for idx_nz, idx in enumerate(o_nnz):
L_list[idx*(n-1):(idx+1)*(n-1),idx_nz] = np.delete(o, idx)
L_sum = L_list.sum(-1,keepdims=True)
#pdb.set_trace()
return L_list.dot(L_list.T) - (1/(len(o_nnz)))*L_sum.dot(L_sum.T)
### Methods for L2 Error Plot
def vectorize(U):
n,_=U.shape
u=np.array([U[i,j] for i in range(n) for j in range(n) if i!=j])
return (u-u.mean())
def gen_datasets(n=6, num_rankings=1000, num_datasets=20, B=1.5, theta=None, d=None, random_state=8080):
if theta is not None:
theta = np.array(theta)
theta -= theta.mean()
U = -np.ones([n,1]).dot(theta); np.fill_diagonal(U,0);
elif d is not None:
T = truncnorm.rvs(-B, B, loc=0, scale=1, size=(n,d), random_state=random_state)
C = truncnorm.rvs(-B, B, loc=0, scale=1, size=(n,d), random_state=random_state)
T /= np.sqrt(d*(n-1))
C /= np.sqrt(d*(n-1))
U = T.dot(C.T)
np.fill_diagonal(U,0); U -= U.sum()/(n*(n-1)); np.fill_diagonal(U,0)
else:
U = truncnorm.rvs(-B, B, loc=0, scale=1, size=(n,n), random_state=random_state)
np.fill_diagonal(U,0); U /= n-1; U -= U.sum()/(n*(n-1)); np.fill_diagonal(U,0)
print(f'U: {U}')
data = [gen_O_J_v2(num_rankings,n,U) for idx in range(num_datasets)]
u = vectorize(U)
return u, data
def fit_functions(data, num_fits, start_point, end_point, d=None, logspace=True):
param_store = []
gv_store = []
if logspace:
if num_fits == 1:
increment_sizes = [int(10**end_point)]
else:
increment_sizes=np.int32( | np.logspace(start_point, end_point, num_fits) | numpy.logspace |
# coding:utf-8
import numpy as np
import torch
import math
import cv2
from pycocotools import mask as cocomask
class IOUMetric(object):
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
acc = np.diag(self.hist).sum() / self.hist.sum()
acc_cls = | np.diag(self.hist) | numpy.diag |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from matplotlib import animation
import matplotlib.pyplot as plt
import time
import numpy as np
import gc
import datetime
from . import AC_tools as AC
"""
Animate a NetCDF array to give a video of 2D (surface)
"""
# --- Settings for calling main as via scripting
specs = ['O3', 'NO2', 'PAN', 'ALD2']
pcent = True
ClearFlo_unit = True
# verbose and debug settings for script main call
debug = True
def main(spec='NO', pcent=False, fixcb=None, limit_by_dates=False,
extend='neither', ClearFlo_unit=False, verbose=True, debug=False):
"""
Extract data array from given location and make Animation
"""
# Get data in array ( time, lat, lon ) and dates (datetime.datetime )
arr, dates = get_data_dates(spec=spec, limit_by_dates=limit_by_dates,
debug=debug)
if debug:
print([(i[:5], i.shape) for i in (arr, dates)])
# Get titles and other varibles for run (e.g. res, lons and lats for GC config. )
lat, lon, units, fname, res, title, scale = get_run_info(spec=spec,
pcent=pcent, ClearFlo_unit=ClearFlo_unit)
arr = arr*scale
# Setup figure and axis
fig, ax = setup_figure_and_axis()
# Set manual limit colormap ( colorbar )?
# fixcb = np.array([ 0, 100])
# extend ='both'
# extend ='max'
# Setup first frame for re-use ( inc. basemap ) and get key variables
cmap, specplt, lvls, cnorm, m, fixcb, fixcb_buffered = setup_plot2animate(
arr, fig=fig, ax=ax, lat=lat, lon=lon,
units=units, res=res, fixcb=fixcb, debug=debug)
# setup figure ascetics
setup_figure_ascetics(dates, cmap=cmap, cnorm=cnorm,
units=units, fig=fig, title=title, extend=extend,
arr=arr, fixcb=fixcb, fixcb_buffered=fixcb_buffered, debug=debug)
# animate the array and save as
animate_array(arr, dates, specplt, lvls=lvls, cnorm=cnorm,
cmap=cmap, debug=debug, fig=fig, m=m, lon=lon, lat=lat,
spec=spec, fname=fname)
def extract_data_dates(spec='O3', file=None, dates_variable='time',
fill_invalid_with_mean=True, limit_by_dates=False,
sdate=datetime.datetime(2005, 0o1, 0o1), ver='1.7',
edate=datetime.datetime(2005, 0o1, 0o7), debug=False):
""" Extracts dates and data from a given location """
from pandas import DataFrame
import numpy as np
from netCDF4 import Dataset
import datetime
# # <= Kludge: convert to tracer name used in NetCDF for extraction
from AC.funcs_vars import what_species_am_i
pspec = what_species_am_i(input=spec, ver=ver, invert=True, debug=debug)
with Dataset(file, 'r') as rootgrp:
if debug:
print([i for i in rootgrp.variables])
# Return data as an array
# arr = np.ma.array( rootgrp.variables[ pspec ] )
arr = np.array(rootgrp.variables[pspec])
print(rootgrp.variables[pspec])
print(np.array(rootgrp.variables[pspec]))
# get dates
dates = | np.ma.array(rootgrp.variables[dates_variable]) | numpy.ma.array |
"""Solvers for multitask group-stl with a simplex constraint."""
import numpy as np
from sklearn.linear_model import Lasso
def solver_stl(X, Y, alpha=None, callback=None, positive=False,
maxiter=3000, tol=1e-4):
"""Perform CD to solve positive Lasso."""
n_tasks, n_samples, n_features = X.shape
theta = np.zeros((n_features, n_tasks))
if callback:
callback(theta)
if alpha is None:
alpha = | np.ones(n_tasks) | numpy.ones |
import pytest
import sys
import numpy as np
import os
#import os.path as osp
import yaml
import matplotlib.pyplot as plt
from scipy.interpolate import PchipInterpolator
import wisdem.inputs as sch # used for loading turbine YAML and using WISDEM validation process
from wisdem.commonse.utilities import arc_length
# test local code; consider src layout in future to test installed code
import raft as raft
import moorpy as mp
import importlib
mp = importlib.reload(mp)
raft = importlib.reload(raft)
raft_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def runRAFT(fname_design, fname_turbine, fname_env):
'''
This the main function for running the raft model in standalone form, where inputs are contained in the specified input files.
'''
# open the design YAML file and parse it into a dictionary for passing to raft
with open(fname_design) as file:
design = yaml.load(file, Loader=yaml.FullLoader)
print("Loading file: "+fname_design)
print(f"'{design['name']}'")
depth = float(design['mooring']['water_depth'])
# now off potMod in the design dictionary to avoid BEM analysis
for mi in design['platform']['members']: mi['potMod'] = False
# set up frequency range
w = np.arange(0.05, 5, 0.05) # frequency range (to be set by modeling options yaml)
# read in turbine data and combine it in
# turbine = loadTurbineYAML(fname_turbine)
# design['turbine'].update(turbine)
# --- Create and run the model ---
model = raft.Model(design, w=w, depth=depth) # set up model
model.setEnv(Hs=8, Tp=12, V=10, Fthrust=float(design['turbine']['Fthrust'])) # set basic wave and wind info
model.calcSystemProps() # get all the setup calculations done within the model
model.solveEigen()
model.calcMooringAndOffsets() # calculate the offsets for the given loading
model.solveDynamics() # put everything together and iteratively solve the dynamic response
model.plot()
plt.show()
return model
def loadTurbineYAML(fname_turbine):
'''
This loads data from a standard turbine YAML file to fill in the turbine portion of the RAFT input dictionary.
'''
# Set discretization parameters
n_span = 30 # [-] - number of blade stations along span
grid = np.linspace(0., 1., n_span) # equally spaced grid along blade span, root=0 tip=1
n_aoa = 200 # [-] - number of angles of attack to discretize airfoil polars
# dictionary to be filled in with turbine data
d = dict(blade={}, airfoils={}, env={})
# Load wind turbine geometry yaml
print("Loading turbine YAML file: "+fname_turbine)
run_dir = os.path.dirname( os.path.realpath(__file__) ) + os.sep
fname_input_wt = os.path.join(run_dir, fname_turbine)
wt_init = sch.load_geometry_yaml(fname_input_wt)
print(f"'{wt_init['name']}'")
# Conversion of the yaml inputs into CCBlade inputs
Rhub = 0.5 * wt_init["components"]["hub"]["diameter"] # [m] - hub radius
d['precone'] = | np.rad2deg(wt_init["components"]["hub"]["cone_angle"]) | numpy.rad2deg |
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy as sp
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import Perceptron as PerceptronClf
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
RANDOM_STATE = 0
# Error calculator for class average for each fold
def error_calc(test_labels, predictions):
error = []
eps = 1e-15
# Calculate the custom metric 1- 0.5(Specificity + Sensitivity)
for i in np.unique(test_labels):
# true positives
tp = ((test_labels == i) & (predictions == i)).sum()
# true negatives
tn = ((test_labels != i) & (predictions != i)).sum()
# false positives
fp = ((test_labels != i) & (predictions == i)).sum()
# false negatives
fn = ((test_labels == i) & (predictions != i)).sum()
tp_new = sp.maximum(eps, tp)
pos_new = sp.maximum(eps, tp+fn)
tn_new = sp.maximum(eps, tn)
neg_new = sp.maximum(eps, tn+fp)
error.append(1 - 0.5*(tp_new/pos_new) - 0.5*(tn_new/neg_new))
# convert the error list into numpy array
error_np = np.array(error)
return | np.mean(error_np) | numpy.mean |
import unittest
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.uniform import Uniform
from .spatial_hist import SpatialHist
class TestSpatialHist(unittest.TestCase):
def setUp(self):
# Parameters
nbin_per_side = 50
prior_count = 0.1
xlm = [-30, 20]
ylm = [-15, 15]
n = 1000 # number of training points in each slice
# Shape of distribution
mu1 = torch.tensor([-22., 0.])
mu2 = torch.tensor([0., 8.])
Sigma = torch.eye(2)
# Sample the data
data1 = MultivariateNormal(mu1, Sigma).sample(torch.Size([n]))
data2 = MultivariateNormal(mu2, Sigma).sample(torch.Size([n]))
data = torch.cat([data1, data2])
# Build the SpatialHist instance & sample data
self.H = SpatialHist(data, xlm, ylm, nbin_per_side, prior_count)
self.syndata, _, _ = self.H.sample(n)
self.data = data
self.xlim = xlm
self.ylim = ylm
def test_sample(self):
"""
Plot the sampled data next to the original data to verify that it
looks correct.
"""
# Plot original data
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8,8))
axes[0].scatter(self.data[:,0], self.data[:,1], s=3, c='r')
axes[0].set_title('original data')
axes[0].set_xlim(self.xlim)
axes[0].set_ylim(self.ylim)
# plot reconstructed data
axes[1].scatter(self.syndata[:,0], self.syndata[:,1], s=3, c='b')
axes[1].set_title('reconstructed data')
axes[1].set_xlim(self.xlim)
axes[1].set_ylim(self.ylim)
plt.show()
# TODO - write assertions
def test_plot(self):
"""
Test visualization of learned position model
"""
self.H.plot()
# TODO - write assertions
def test_dualMethodLL(self):
"""
Check two different ways of computing likelihood
"""
ll = self.H.score(self.syndata)
_, ll2 = self.H.get_id(self.syndata)
ll2 = torch.sum(ll2)
self.assertTrue(torch.abs(ll - ll2) <= 1e-2)
def test_validDensity(self):
"""
Numerically check the normalizing constant of the density
"""
nsamp = 10000
area = (self.xlim[1]-self.xlim[0]) * (self.ylim[1]-self.ylim[0])
x = Uniform(low=self.xlim[0], high=self.xlim[1]).sample(torch.Size([nsamp]))
y = Uniform(low=self.ylim[0], high=self.ylim[1]).sample(torch.Size([nsamp]))
D = torch.cat([x.view(-1, 1), y.view(-1, 1)], 1)
_, ll = self.H.get_id(D)
ltot = torch.logsumexp(ll.view(-1), 0)
tsum = torch.exp(ltot)
tot = (area/nsamp) * tsum
tot = tot.item()
print('Average score: %0.3f' % tot)
self.assertTrue( | np.abs(1 - tot) | numpy.abs |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest, os
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
@skip_check_grad_ci(reason="DNNL's MatMul doesn't implemend grad kernel.")
class TestDnnlMatMulOp(OpTest):
def generate_data(self):
self.x = np.random.random((25, 2, 2)).astype("float32")
self.y = np.random.random((25, 2, 2)).astype("float32")
self.alpha = 1.0
self.out = self.alpha * np.matmul(self.x, self.y)
def set_attributes(self):
self.alpha = self.alpha if hasattr(self, 'alpha') else 1.0
self.attrs = {'alpha': self.alpha}
def setUp(self):
# Set max isa, otherwise fails on SKX and earlier
os.environ["DNNL_MAX_CPU_ISA"] = "AVX"
self.op_type = "matmul"
self._cpu_only = True
self.use_mkldnn = True
self.generate_data()
self.set_attributes()
self.attrs['use_mkldnn'] = True
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': self.out}
def test_check_output(self):
self.check_output()
class TestDnnlMatMulOpAlpha(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((17, 2, 3)).astype("float32")
self.y = np.random.random((17, 3, 2)).astype("float32")
self.alpha = 2.0
self.out = self.alpha * np.matmul(self.x, self.y)
class TestDnnlMatMulOp2D(TestDnnlMatMulOp):
def print_tensor(self, name, tensor):
print(name)
print(tensor)
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((9, 12)).astype("float32")
self.out = np.matmul(self.x, self.y)
class TestDnnlMatMulOpTransposeX(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((12, 9)).astype("float32")
self.out = np.matmul(np.transpose(self.x), self.y)
def set_attributes(self):
self.attrs = {'transpose_X': True}
class TestDnnlMatMulOpTransposeY(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((12, 9)).astype("float32")
self.y = np.random.random((12, 9)).astype("float32")
self.out = np.matmul(self.x, np.transpose(self.y))
def set_attributes(self):
self.attrs = {'transpose_Y': True}
class TestDnnlMatMulOpTransposeY3D(TestDnnlMatMulOp):
def generate_data(self):
self.x = np.random.random((17, 3, 2)).astype("float32")
self.y = np.random.random((17, 3, 2)).astype("float32")
self.out = np.matmul(self.x, np.transpose(self.y, (0, 2, 1)))
def set_attributes(self):
self.attrs = {'transpose_Y': True}
class TestDnnlMatMulOpInt8NoScales(TestDnnlMatMulOp):
def generate_data(self):
self.x = | np.random.random((12, 9)) | numpy.random.random |
'''
this is EMU^r (recursive computation of expected marginal utility) algorithm of Bhattacharjee et.al
REFERENCES:
<NAME>., <NAME>., <NAME>., <NAME>.: Bridging the gap: Manyobjective optimization and informed decision-making. IEEE Trans. Evolutionary
Computation 21(5), 813{820 (2017)
'''
import numpy as np
import math as m
import copy
from sklearn.cluster import AffinityPropagation
class solution(object):
def __init__(self):
self.index = None
self.objective = None
self.original_objective = None
self.type = None
self.marginalU = 0
self.front = 0
self.pick = 0
self.re_vector = None
class reference_point(object):
def __init__(self):
self.direction = None
self.neighbor = []
self.associate = []
self.identify = None
def compute_emu(p, w):
for i in range(len(p)):
p[i].index = i
obj_mat = np.asarray([i.objective for i in p]).T
w_mat = np.asarray([i.direction for i in w])
u_mat = np.dot(w_mat, obj_mat)
for i in range(len(u_mat)):
temp_index = np.argsort(u_mat[i, :])
for j in p:
if j.index == temp_index[0]:
k = 1
eta = u_mat[i, temp_index[k]] - u_mat[i, temp_index[0]]
while eta == 0.0:
k = k + 1
if k >= len(temp_index):
eta = 0
break
eta = u_mat[i, temp_index[k]] - u_mat[i, temp_index[0]]
j.marginalU += eta
else:
j.marginalU += 0
return p
def Compute(p, w):
front_current = 0
current_array = p
while len(current_array) > 1:
current_array = compute_emu(current_array, w)
front_current += 1
next_array = []
for i in current_array:
if i.marginalU != 0:
i.front = front_current
else:
next_array.append(i)
current_array = next_array
if len(current_array) == 1:
front_current += 1
current_array[0].front = front_current
else:
pass
for i in range(front_current):
front_now = front_current - i
if front_now != 1:
temp_front_array = [j.marginalU for j in p if j.front == front_now]
temp_max_emu = max(temp_front_array)
for k in p:
if k.front == front_now-1:
k.marginalU += temp_max_emu
else:
pass
else:
pass
return p
def Associate(p, w):
obj_mat = np.asarray([i.objective for i in p]).T
w_mat = np.asarray([i.direction for i in w])
d_mat = np.dot(w_mat, obj_mat)
for i in range(len(w_mat)):
d_mat[i, :] = d_mat[i, :] / np.sqrt(sum(w_mat[i, :]**2))
for i in range(len(obj_mat[0, :])):
length2 = sum(obj_mat[:, i]**2)
for j in range(len(d_mat[:, i])):
d_2 = length2-d_mat[j, i]**2
if d_2 < 0:
d_mat[j, i] = 0
else:
d_mat[j, i] = d_2
w[np.argmin(d_mat[:, i])].associate.append(p[i])
p[i].repoints = w[np.argmin(d_mat[:, i])]
return p, w
def Identify(w):
for i in w:
if len(i.associate) >= 1:
temp_max = -1
for k in i.associate:
if k.marginalU > temp_max:
temp_max = k.marginalU
i.identify = k
k.re_vector = i
else:
pass
else:
pass
return w
def Select(w):
select_set = []
for i in w:
if i.identify:
mark = 1
for j in i.neighbor:
if j.identify:
if i.identify.marginalU >= j.identify.marginalU:
pass
else:
mark = 0
else:
pass
if mark == 1:
select_set.append(i.identify)
else:
pass
else:
pass
return select_set
def Initializaiton(p, w):
for i in p:
i.type = None
i.index = None
i.marginalU = 0
i.front = 0
i.pick = 0
i.re_vector = None
for i in w:
i.associate = []
i.identify = None
return p, w
def main_function(data, K):
points = copy.copy(data)
dim = len(points[0])
popsize = len(points)
for i in range(dim):
temp1 = max(points[:, i])
temp2 = min(points[:, i])
points[:, i] = (points[:, i] - temp2) / (temp1 - temp2)
solutions = [solution() for i in range(popsize)]
# reference_points
div = 0
H = 0
factor = 400 / 1600 * popsize
while H <= factor:
div += 1
H = m.factorial(div + dim - 1) / (m.factorial(div) * m.factorial(dim - 1))
div -= 1
list_range = [i / div for i in range(div + 1)]
direction = []
def w_generator(now_dim, now_sum, now_array):
if now_dim == 1:
for i in list_range:
temp_array = copy.copy(now_array)
if round(i + now_sum - 1, 5) == 0:
temp_array.append(i)
direction.append(temp_array)
else:
for i in list_range:
temp_array = copy.copy(now_array)
if round(i + now_sum - 1, 5) <= 0:
temp_array.append(i)
w_generator(now_dim - 1, now_sum + i, temp_array)
w_generator(dim, 0, [])
direction = np.asarray(direction)
Repoints = [reference_point() for i in range(len(direction))]
for i in range(len(direction)):
Repoints[i].direction = direction[i, :]
distance_list = np.sum((direction - direction[i, :] * np.ones(direction.shape)) ** 2, axis = 1)
distance_sort = | np.argsort(distance_list) | numpy.argsort |
"""
gui/average
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for three-dimensional averaging of particles
:author: <NAME>, 2017
:copyright: Copyright (c) 2017 Jungmann Lab, Max Planck Institute of Biochemistry
"""
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt4 import QtCore, QtGui
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis,vx,vy,vz,angle,pixelsize):
if axis == 'z':
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == 'y':
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == 'x':
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtGui.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle('Parameters')
self.setModal(False)
grid = QtGui.QGridLayout(self)
grid.addWidget(QtGui.QLabel('Oversampling:'), 0, 0)
self.oversampling = QtGui.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtGui.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtGui.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == '.hdf5':
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap('magma')(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype('uint8')
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order='C')
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(pixmap.scaled(self.width(), self.height(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation))
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(self.locs, oversampling, t_min, t_min, t_max, t_max)
self.set_image(image_avg)
class DatasetDialog(QtGui.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle('Datasets')
self.setModal(False)
self.layout = QtGui.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self,path):
c = QtGui.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtGui.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Picasso: Average3')
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, 'icons', 'average.ico')
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu('File')
open_action = file_menu.addAction('Open')
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction('Save')
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu('Process')
parameters_action = process_menu.addAction('Parameters')
parameters_action.setShortcut('Ctrl+P')
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction('Datasets')
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = 'Zoom'
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtGui.QLabel('')
self.viewxz = QtGui.QLabel('')
self.viewyz = QtGui.QLabel('')
self.viewcp = QtGui.QLabel('')
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtGui.QGroupBox('Display')
displaygrid = QtGui.QGridLayout(display_groupbox)
displaygrid.addWidget(QtGui.QLabel('XY'), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtGui.QLabel('XZ'), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtGui.QLabel('YZ'), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtGui.QLabel('CP'), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtGui.QGroupBox('Buttons')
buttongrid = QtGui.QGridLayout(button_groupbox)
rotation_groupbox = QtGui.QGroupBox('Rotation + Translation')
rotationgrid = QtGui.QGridLayout(rotation_groupbox)
centerofmassbtn = QtGui.QPushButton("Center of Mass XYZ")
axis_groupbox = QtGui.QGroupBox('Axis')
axisgrid = QtGui.QGridLayout(axis_groupbox)
self.x_axisbtn = QtGui.QRadioButton("X")
self.y_axisbtn = QtGui.QRadioButton("Y")
self.z_axisbtn = QtGui.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtGui.QGroupBox('Projection')
projgrid = QtGui.QGridLayout(proj_groupbox)
self.xy_projbtn = QtGui.QRadioButton("XY")
self.yz_projbtn = QtGui.QRadioButton("YZ")
self.xz_projbtn = QtGui.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtGui.QPushButton("Rotate")
self.radio_sym = QtGui.QRadioButton("x symmetry")
self.symEdit = QtGui.QSpinBox()
self.symEdit.setRange(2, 50)
self.symEdit.setValue(8)
self.radio_sym_custom = QtGui.QRadioButton("custom symmetry")
self.symcustomEdit = QtGui.QLineEdit("90,180,270")
deg_groupbox = QtGui.QGroupBox('Degrees')
deggrid = QtGui.QGridLayout(deg_groupbox)
self.full_degbtn = QtGui.QRadioButton("Full")
self.part_degbtn = QtGui.QRadioButton("Part")
self.degEdit = QtGui.QTextEdit()
self.degEdit = QtGui.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtGui.QCheckBox("Translate only")
self.flipbtn = QtGui.QCheckBox("Consider flipped structures")
self.alignxbtn = QtGui.QPushButton("Align X")
self.alignybtn = QtGui.QPushButton("Align Y")
self.alignzzbtn = QtGui.QPushButton("Align Z_Z")
self.alignzybtn = QtGui.QPushButton("Align Z_Y")
self.translatexbtn = QtGui.QPushButton("Translate X")
self.translateybtn = QtGui.QPushButton("Translate Y")
self.translatezbtn = QtGui.QPushButton("Translate Z")
self.rotatexy_convbtn = QtGui.QPushButton('Rotate XY - Convolution')
self.scorebtn = QtGui.QPushButton('Calculate Score')
operate_groupbox = QtGui.QGroupBox('Operate')
operategrid = QtGui.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtGui.QLineEdit('-3,3')
rotationgrid.addWidget(QtGui.QLabel('x-Range (Px)'), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtGui.QLineEdit('-3,3')
rotationgrid.addWidget(QtGui.QLabel('y-Range (Px)'), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtGui.QLineEdit('-1000,1000')
rotationgrid.addWidget(QtGui.QLabel('z-Range (nm)'), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn,4,0)
operategrid.addWidget(self.scorebtn,4,1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtGui.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtGui.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtGui.QGroupBox('Contrast')
contrastgrid = QtGui.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = '0,20,40,60,0,20,40,60,0,20,40,60'
MODEL_Y_DEFAULT = '0,20,40,0,20,40,0,20,40,0,20,40'
MODEL_Z_DEFAULT = '0,0,0,0,0,0,0,0,0,0,0,0'
self.modelchk = QtGui.QCheckBox("Use Model")
self.model_x = QtGui.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtGui.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtGui.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtGui.QPushButton('Preview')
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtGui.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtGui.QSpinBox()
self.pixelsizeEdit.setRange(1,999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtGui.QGroupBox('Model')
modelgrid = QtGui.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk,0,0)
modelgrid.addWidget(QtGui.QLabel('X-Coordinates'),1,0)
modelgrid.addWidget(self.model_x,1,1)
modelgrid.addWidget(QtGui.QLabel('Y-Coordinates'),2,0)
modelgrid.addWidget(self.model_y,2,1)
modelgrid.addWidget(QtGui.QLabel('Z-Coordinates'),3,0)
modelgrid.addWidget(self.model_z,3,1)
modelgrid.addWidget(QtGui.QLabel('Blur:'),4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtGui.QLabel('Pixelsize:'),5,0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6 ,0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtGui.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage('Average3 ready.')
def open(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open localizations', filter='*.hdf5')
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]['Width'] / 2
cy = self.infos[i][0]['Height'] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{'Generated by': 'Picasso Average3'}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, 'z')
out_path = os.path.splitext(self.locs_paths[i])[0] + '_avg3.hdf5'
path = QtGui.QFileDialog.getSaveFileName(self, 'Save localizations', out_path, filter='*.hdf5')
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == '.hdf5':
print('Opening {} ..'.format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, 'group'):
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle('Error')
msgBox.setText('Datafile does not contain group information. Please load file with picked localizations.')
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, 'z'):
locs = lib.append_to_rec(locs, locs.x.copy(), 'z')
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize,ok = QtGui.QInputDialog.getInt(self,"Pixelsize Dialog","Please enter the pixelsize in nm", 130)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(self.updateLayout)
cx = self.infos[-1][0]['Width'] / 2
cy = self.infos[-1][0]['Height'] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean([np.median(locs.lpx), np.median(locs.lpy)])
if hasattr(locs, 'group'):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x),np.min(locs.y)])
self.t_max = np.max([np.max(locs.x),np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min([np.min(locs.x),np.min(locs.y),self.t_min])
self.t_max = np.max([np.max(locs.x),np.max(locs.y),self.t_max])
self.z_min = np.min([np.min(locs.z),self.z_min])
self.z_max = np.min([np.max(locs.z),self.z_max])
if len(self.locs) == 1:
print('Dataset loaded from {}.'.format(path))
else:
print('Dataset loaded from {}, Total number of datasets {}.'.format(path, len(self.locs)))
#CREATE GROUP INDEX
if hasattr(locs, 'group'):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix((n_groups, n_locs), dtype=np.bool)
progress = lib.ProgressDialog('Creating group index', 0, len(groups), self)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i+1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
#Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
#stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x=stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y=stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z=stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
#CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max([3 * np.sqrt(np.mean(self.locs[j].x**2 + self.locs[j].y**2)),self.r])
self.r_z = np.max([5 * np.sqrt(np.mean(self.locs[j].z**2)),self.r_z])
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print('Aligning by center of mass.. ', end='', flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog('Aligning by center of mass', 0, n_groups, self)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i+1)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
index = self.group_index[j][i, :].nonzero()[1]
self.locs[j].x[index] -= mean_x
self.locs[j].y[index] -= mean_y
self.locs[j].z[index] -= mean_z
self.calculate_radii()
self.updateLayout()
print('Complete.')
def histtoImage(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap('magma')(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype('uint8')
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order='C')
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(self.viewxy.width(), np.round(self.viewxy.height()*Y/X), QtCore.Qt.KeepAspectRatioByExpanding)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def hist_multi_channel(self, locs):
oversampling = self.parameters_dialog.oversampling.value()
self.oversampling = oversampling
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
renderings = []
for i in range(n_channels):
if self.dataset_dialog.checks[i].isChecked():
renderings.append(render.render_hist3d(locs[i], oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize))
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
pixmap1 = self.pixmap_from_colors(images,colors,2)
pixmap2 = self.pixmap_from_colors(images,colors,0)
pixmap3 = self.pixmap_from_colors(images,colors,1)
return pixmap1, pixmap2, pixmap3
def pixmap_from_colors(self,images,colors,axisval):
if axisval == 2:
image = [np.sum(_, axis=axisval) for _ in images]
else:
image = [np.transpose(np.sum(_, axis=axisval)) for _ in images]
image = np.array([self.scale_contrast(_) for _ in image])
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(self.viewxy.width(), np.round(self.viewxy.height()*Y/X), QtCore.Qt.KeepAspectRatioByExpanding)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def align_x(self):
print('Align X')
self.align_all('x')
def align_y(self):
print('Align Y')
self.align_all('y')
def align_zz(self):
print('Align Z')
self.align_all('zz')
def align_zy(self):
print('Align Z')
self.align_all('zy')
def translate_x(self):
print('Translate X')
self.translate('x')
def translate_y(self):
print('Translate Y')
self.translate('y')
def translate_z(self):
print('Translate Z')
self.translate('z')
def translate(self, translateaxis):
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
if translateaxis == 'x':
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
elif translateaxis == 'y':
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=1) for _ in image]
elif translateaxis == 'z':
image = [np.sum(_, axis=1) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1, 1 ,1)
for element in signalimg:
plt.plot(element)
n_groups = self.group_index[0].shape[0]
print('Translating..')
for i in tqdm(range(n_groups)):
self.status_bar.showMessage('Group {} / {}.'.format(i, n_groups))
self.translate_group(signalimg, i, translateaxis)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
self.centerofmass_all()
self.updateLayout()
self.status_bar.showMessage('Done!')
def translate_group(self, signalimg, group, translateaxis):
n_channels = len(self.locs)
all_xcorr = np.zeros((1, n_channels))
all_da = np.zeros((1, n_channels))
if translateaxis == 'x':
proplane = 'xy'
elif translateaxis == 'y':
proplane = 'xy'
elif translateaxis == 'z':
proplane = 'xz'
plotmode = 0
for j in range(n_channels):
if plotmode:
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(signalimg[j])
ax2 = fig.add_subplot(1, 3, 2)
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
xcorr_max = 0.0
plane = self.render_planes(x_rot, y_rot, z_rot, proplane, self.pixelsize) #
if translateaxis == 'x':
projection = np.sum(plane, axis=0)
elif translateaxis == 'y':
projection = np.sum(plane, axis=1)
elif translateaxis == 'z':
projection = np.sum(plane, axis=1)
if plotmode:
plt.plot(projection)
#print('Step X')
#ax3 = fig.add_subplot(1,3,3)
#plt.imshow(plane, interpolation='nearest', cmap=plt.cm.ocean)
corrval = np.max(signal.correlate(signalimg[j],projection))
shiftval = np.argmax(signal.correlate(signalimg[j], projection))-len(signalimg[j])+1
all_xcorr[0,j] = corrval
all_da[0,j] = shiftval/self.oversampling
if plotmode:
plt.show()
#value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr,axis = 1))
dafinal = np.mean(all_da[maximumcc,:])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
if translateaxis == 'x':
self.locs[j].x[index] += dafinal
elif translateaxis == 'y':
self.locs[j].y[index] += dafinal
elif translateaxis == 'z':
self.locs[j].z[index] += dafinal*self.pixelsize
def adjust_z(self):
z_range_str = np.asarray((self.z_range.text()).split(","))
z_range = []
for element in z_range_str:
try:
z_range.append(float(element))
except ValueError:
pass
z_min = z_range[0]
z_max = z_range[1]
self.z_min = np.max([z_min, self.z_min_load])
self.z_max = np.min([z_max, self.z_max_load])
print('Z min {}, Z max {}'.format(self.z_min, self.z_max))
self.updateLayout()
def adjust_xy(self):
x_range_str = np.asarray((self.x_range.text()).split(","))
x_range = []
for element in x_range_str:
try:
x_range.append(float(element))
except ValueError:
pass
x_min = x_range[0]
x_max = x_range[1]
self.x_min = np.max([x_min, self.t_min])
self.x_max = np.min([x_max, self.t_max])
print('X min {}, X max {}'.format(self.x_min, self.x_max))
y_range_str = np.asarray((self.y_range.text()).split(","))
y_range = []
for element in y_range_str:
try:
y_range.append(float(element))
except ValueError:
pass
y_min = y_range[0]
y_max = y_range[1]
self.y_min = np.max([y_min, self.t_min])
self.y_max = np.min([y_max, self.t_max])
print('Y min {}, Y max {}'.format(self.y_min, self.y_max))
self.updateLayout()
def rotatexy_convolution_group(self, CF_image_avg, angles, group, rotaxis, proplane):
n_channels = len(self.locs)
allrot = []
alldx = []
alldy = []
alldz = []
n_angles = len(angles)
all_xcorr = np.zeros((n_angles,n_channels))
all_da = np.zeros((n_angles,n_channels))
all_db = np.zeros((n_angles,n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
xcorr_max = 0.0
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
# rotate locs
x_rot, y_rot, z_rot = rotate_axis(rotaxis, x_original, y_original, z_original, angle, self.pixelsize)
# render group image for plane
image = self.render_planes(x_rot, y_rot, z_rot, proplane, self.pixelsize)
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax1.set_aspect('equal')
plt.imshow(image, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = np.sum(np.multiply(CF_image_avg[j], image))
all_xcorr[k,j] = xcorr
#value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr,axis = 1))
rotfinal = angles[maximumcc]
dafinal = np.mean(all_da[maximumcc,:])
dbfinal = np.mean(all_db[maximumcc,:])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(rotaxis, x_original, y_original, z_original, rotfinal, self.pixelsize)
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
def rotatexy_convolution(self):
#TODO: re-write ths with kwargs at some point
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = 'x'
elif self.y_axisbtn.isChecked():
rotaxis = 'y'
elif self.z_axisbtn.isChecked():
rotaxis = 'z'
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2*np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(-degree/360*2*np.pi, degree/360*2*np.pi, a_step)
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
#DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = 'xy'
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = 'yz'
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = 'xz'
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
#Change CFiamge for symmetry
if self.radio_sym.isChecked():
print('Using symmetry.')
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for i in range(symmetry-1):
image[0] += scipy.ndimage.interpolation.rotate(imageold,((i+1)*360/symmetry) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.radio_sym_custom.isChecked():
print('Using custom symmetry.')
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(','))
print(symmetry_txt)
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(imageold, float(degree) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = image
# TODO: blur auf average !!!
print('Convolving..')
for i in tqdm(range(n_groups)):
self.status_bar.showMessage('Group {} / {}.'.format(i,n_groups))
self.rotatexy_convolution_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage('Done!')
def rotate_groups(self):
#Read out values from radiobuttons
#TODO: maybe re-write this with kwargs
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = 'x'
elif self.y_axisbtn.isChecked():
rotaxis = 'y'
elif self.z_axisbtn.isChecked():
rotaxis = 'z'
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2*np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(-degree/360*2*np.pi, degree/360*2*np.pi, a_step)
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
images = np.array([_[1] for _ in renderings])
#DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = 'xy'
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = 'yz'
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = 'xz'
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
if self.radio_sym.isChecked():
print('Radio sym')
fig = plt.figure(figsize = (5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for i in range(symmetry-1):
image[0] += scipy.ndimage.interpolation.rotate(imageold,((i+1)*360/symmetry) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
#TODO: Sort these functions out, combine with radio_sym / also for convolving.
if self.radio_sym_custom.isChecked():
print('Using custom symmetry.')
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(','))
fig = plt.figure(figsize =(5,5))
ax1 = fig.add_subplot(1,2,1)
symmetry = self.symEdit.value()
ax1.set_aspect('equal')
imageold = image[0].copy()
plt.imshow(imageold, interpolation='nearest', cmap=plt.cm.ocean)
#rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(imageold, float(degree) , axes=(1, 0),reshape=False)
ax2 = fig.add_subplot(1,2,2)
ax2.set_aspect('equal')
plt.imshow(image[0], interpolation='nearest', cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = [np.conj(np.fft.fft2(_)) for _ in image]
#n_pixel, _ = image_avg.shape
#image_half = n_pixel / 2
# TODO: blur auf average !!!
print('Rotating..')
for i in tqdm(range(n_groups)):
self.status_bar.showMessage('Group {} / {}.'.format(i,n_groups))
self.align_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage('Done!')
def getUIstate(self):
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = 'x'
elif self.y_axisbtn.isChecked():
rotaxis = 'y'
elif self.z_axisbtn.isChecked():
rotaxis = 'z'
proplane = []
if self.xy_projbtn.isChecked():
proplane = 'xy'
elif self.yz_projbtn.isChecked():
proplane = 'yz'
elif self.xz_projbtn.isChecked():
proplane = 'xz'
return rotaxis, proplane
def projectPlanes(self, images, proplane):
if proplane == 'xy':
image = [np.sum(_, axis=2) for _ in images]
elif proplane == 'yz':
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif proplane == 'xz':
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
return image
def generate_template(self):
model_x_str = np.asarray((self.model_x.text()).split(","))
model_y_str = np.asarray((self.model_y.text()).split(","))
model_z_str = np.asarray((self.model_z.text()).split(","))
model_x = []
model_y = []
model_z = []
for element in model_x_str:
try:
model_x.append(float(element))
except ValueError:
pass
for element in model_y_str:
try:
model_y.append(float(element))
except ValueError:
pass
for element in model_z_str:
try:
model_z.append(float(element))
except ValueError:
pass
pixelsize = self.pixelsizeEdit.value()
blur = self.modelblurEdit.value()
# Center of mass
model_x = np.array(model_x)/pixelsize
model_y = np.array(model_y)/pixelsize
model_z = np.array(model_z)
model_x = model_x - np.mean(model_x)
model_y = model_y - np.mean(model_y)
model_z = model_z - np.mean(model_z)
rotaxis, proplane = self.getUIstate()
template_img = self.render_planes(model_x, model_y, model_z, proplane, pixelsize)
self.template_img = scipy.ndimage.filters.gaussian_filter(template_img,blur)
def model_preview(self):
self.generate_template()
#Generate a template image
fig = plt.figure()
plt.title('Preview of Template')
plt.imshow(self.template_img, interpolation='nearest', cmap =plt.cm.hot)
plt.show()
def calculate_score(self):
#Dummy button -> Functionality of rotatebtn for now
#TODO: maybe re-write this with kwargs
self.scores = []
rotaxis, proplane = self.getUIstate()
n_groups = self.group_index[0].shape[0]
renderings = [render.render_hist3d(_, self.oversampling, self.t_min, self.t_min, self.t_max, self.t_max, self.z_min, self.z_max, self.pixelsize) for _ in self.locs]
n_locs = sum([_[0] for _ in renderings])
#Make an average and not a sum image here..
images = np.array([_[1]/n_groups for _ in renderings])
#DELIVER CORRECT PROJECTION FOR IMAGE
image = self.projectPlanes(images, proplane)
n_channels = len(image)
print('Calculating score..')
for i in tqdm(range(n_groups)):
channel_score = []
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][i].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
groupimage = self.render_planes(x_rot, y_rot, z_rot, proplane, self.pixelsize)
score = np.sum(np.sqrt(groupimage*image[j]))/np.sum(np.sqrt(groupimage*groupimage))
channel_score.append(score)
self.scores.append(channel_score)
self.status_bar.showMessage('Group {} / {}.'.format(i,n_groups))
self.status_bar.showMessage('Done. Average score: {}'.format(np.mean(self.scores)))
plt.hist(np.array(self.scores), 40)
plt.title('Histogram of Scores, Mean: {:.2f}'.format(np.mean(self.scores)))
plt.xlabel('Score')
plt.ylabel('Counts')
plt.show()
def mean_angle(self, deg):
return (phase(sum(rect(1, d) for d in deg)/len(deg)))
def render_planes(self, xdata, ydata, zdata, proplane, pixelsize):
#assign correct renderings for all planes
a_render = []
b_render = []
if proplane == 'xy':
a_render = xdata
b_render = ydata
aval_min = self.t_min
aval_max = self.t_max
bval_min = self.t_min
bval_max = self.t_max
elif proplane == 'yz':
a_render = ydata
b_render = np.divide(zdata,pixelsize)
aval_min = self.t_min
aval_max = self.t_max
bval_min = np.divide(self.z_min,pixelsize)
bval_max = np.divide(self.z_max,pixelsize)
elif proplane == 'xz':
b_render = np.divide(zdata, pixelsize)
a_render = xdata
bval_min = np.divide(self.z_min,pixelsize)
bval_max = np.divide(self.z_max,pixelsize)
aval_min = self.t_min
aval_max = self.t_max
N, plane = render_histxyz(a_render, b_render, self.oversampling, aval_min, aval_max, bval_min, bval_max)
return plane
def align_all(self, alignaxis):
a_step = np.arcsin(1 / (self.oversampling * self.r))
angles = np.arange(0, 2*np.pi, a_step)
n_channels = len(self.locs)
allrot = []
n_angles = len(angles)
all_corr = np.zeros((n_angles,n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
alignimage = []
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
alignimage = []
for k in range(n_angles):
angle = angles[k]
if alignaxis == 'zz':
proplane = 'yz'
rotaxis = 'x'
elif alignaxis == 'zy':
proplane = 'yz'
rotaxis = 'x'
elif alignaxis == 'y':
proplane = 'xy'
rotaxis = 'z'
elif alignaxis == 'x':
proplane = 'xy'
rotaxis = 'z'
x_rot, y_rot, z_rot = rotate_axis(rotaxis, x_original, y_original, z_original, angle,self.pixelsize)
# render group image for plane
image = self.render_planes(x_rot, y_rot, z_rot, proplane, self.pixelsize) #RENDR PLANES WAS BUGGY AT SOME POINT
if alignimage == []:
alignimage = np.zeros(image.shape)
#CREATE ALIGNIMAGE
if alignaxis == 'zz':
alignimage[np.int(alignimage.shape[0]/2),:]+=2
alignimage[np.int(alignimage.shape[0]/2)+1,:]+=1
alignimage[np.int(alignimage.shape[0]/2)-1,:]+=1
elif alignaxis == 'zy':
alignimage[:,np.int(alignimage.shape[0]/2)]+=2
alignimage[:,np.int(alignimage.shape[0]/2)+1]+=1
alignimage[:,np.int(alignimage.shape[0]/2)-1]+=1
elif alignaxis == 'y':
alignimage[:,np.int(alignimage.shape[1]/2)]+=2
alignimage[:,np.int(alignimage.shape[1]/2)-1]+=1
alignimage[:,np.int(alignimage.shape[1]/2)+1]+=1
elif alignaxis == 'x':
alignimage[np.int(alignimage.shape[0]/2),:]+=2
alignimage[np.int(alignimage.shape[0]/2)+1,:]+=1
alignimage[ | np.int(alignimage.shape[0]/2) | numpy.int |
import random
from collections import namedtuple
from enum import IntEnum
from pathlib import Path
from typing import Sequence, Union
import numpy as np
from PIL import Image
from math_utils import Vec2, Vec3, apply_weights
from tiny_renderer.bitmap import Bitmap
from tiny_renderer.model import Model
Color = namedtuple("Color", "r g b a")
class Colors:
White = Color(255, 255, 255, 255)
Red = Color(255, 0, 0, 255)
Green = Color(0, 255, 0, 255)
Blue = Color(0, 0, 255, 255)
class LightingMode(IntEnum):
"""
Lighting mode for `TinyRenderer`
"""
Smooth = 0
Flat = 1
@classmethod
def get_caption(cls, index):
captions = {
cls.Smooth: "Smooth",
cls.Flat: "Flat",
}
return captions[index]
@classmethod
def get_captions(cls):
return [LightingMode.get_caption(i) for i in LightingMode]
class RenderingMode(IntEnum):
"""
Possible rendering modes for `TinyRenderer`
"""
Wireframe = 0
RandomColors = 1
Texturized = 2
LightOnly = 3
@classmethod
def get_caption(cls, index):
captions = {
cls.Wireframe: "Wireframe",
cls.RandomColors: "Random color",
cls.Texturized: "Texturized",
cls.LightOnly: "Light only",
}
return captions[index]
@classmethod
def get_captions(cls):
return [RenderingMode.get_caption(i) for i in RenderingMode]
class TinyRenderer:
"""
My own version of the original Tiny Renderer:
https://github.com/ssloy/tinyrenderer/wiki
"""
def __init__(self, *, bind_texture=True):
"""
:param bind_texture:
If `True`, `TinyRenderer` will create a `tiny_renderer.bitmap.Bitmap` instance
binding any rendered image to an OpenGL texture. This can be disabled for tests
so they don't need to initialize an OpenGL context.
"""
self._height = 800
self._width = 800
self._depth = 800
self._scale_x = 1.0
self._scale_y = 1.0
self._scale_z = 1.0
self._image = | np.zeros((self._height, self._width, 3), np.uint8) | numpy.zeros |
import numpy as np
import pandas as pd
import scipy.integrate as integrate
import scipy.optimize as optimize
def simulate_JM_base(I, obstime, miss_rate=0.1, opt="none", seed=None):
if seed is not None:
np.random.seed(seed)
J = len(obstime)
#### longitudinal submodel ####
beta0 = np.array([1.5,2,0.5],)
beta1 = np.array([2,-1,1])
betat = np.array([1.5, -1, 0.6])
b_var = np.array([1,1.5,2])
e_var = np.array([1,1,1])
rho = np.array([-0.2,0.1,-0.3])
b_Sigma = np.diag(b_var)
b_Sigma[0,1] = b_Sigma[1,0] = np.sqrt(b_var[0]*b_var[1])*rho[0]
b_Sigma[0,2] = b_Sigma[2,0] = np.sqrt(b_var[0]*b_var[2])*rho[1]
b_Sigma[1,2] = b_Sigma[2,1] = np.sqrt(b_var[1]*b_var[2])*rho[2]
X = np.random.normal(3,1,size=I)
ranef = np.random.multivariate_normal(mean=[0,0,0], cov=b_Sigma, size=I)
mean_long = beta0 + np.outer(X,beta1)
eta_long = mean_long + ranef
if opt=="none" or opt=="nonph":
gamma = np.array([-4,-2])
alpha = np.array([0.2,-0.2,0.4])
x1 = np.random.binomial(n=1,p=0.5,size=I)
x2 = np.random.normal(size=I)
W = np.stack((x1,x2), axis=1)
eta_surv = W@gamma + eta_long@alpha
base = W[...,np.newaxis]
if opt=="interaction":
gamma = np.array([-4,-2,3])
alpha = np.array([0.2,-0.2,0.4])
x1 = np.random.binomial(n=1,p=0.5,size=I)
x2 = np.random.normal(size=I)
x3 = x1*x2
W = np.stack((x1,x2,x3), axis=1)
eta_surv = W@gamma + eta_long@alpha
base = np.stack((x1,x2), axis=1)
base = base[...,np.newaxis]
#Simulate Survival Times using Inverse Sampling Transform
scale = np.exp(-7)
U = np.random.uniform(size=I)
alpha_beta = alpha@betat
def CHF(tau):
def h(t):
if opt=="none" or opt=="interaction":
return scale * np.exp(eta_surv[i] + alpha_beta*t)
if opt=="nonph":
return scale * np.exp(eta_surv[i] + 3*x2[i]*np.sin(t) + alpha_beta*t)
return np.exp(-1 * integrate.quad(lambda xi: h(xi),0,tau)[0])
Ti = np.empty(I)
Ti[:] = np.NaN
for i in range(0,I):
Ti[i] = optimize.brentq(lambda xi: U[i]-CHF(xi), 0, 100)
#Get true survival probabilities
true_prob = np.ones((I, len(obstime)))
for i in range(0,I):
for j in range(1,len(obstime)):
tau = obstime[j]
true_prob[i,j] = CHF(tau)
C = np.random.uniform(low=obstime[3], high=obstime[-1]+25, size=I)
C = np.minimum(C, obstime[-1])
event = Ti<C
true_time = np.minimum(Ti, C)
# round true_time up to nearest obstime
time = [np.min([obs for obs in obstime if obs-t>=0]) for t in true_time]
subj_obstime = np.tile(obstime, reps=I)
pred_time = np.tile(obstime, reps=I)
mean_long = np.repeat(mean_long, repeats=J, axis=0)
eta_long = np.repeat(eta_long, repeats=J, axis=0)
long_err = np.random.multivariate_normal(mean=[0,0,0], cov=np.diag(e_var), size=I*J)
Y = np.empty((I*J,3))
Y_pred = np.empty((I*J,3))
for i in range(0,3):
Y[:,i] = eta_long[:,i] + betat[i]*subj_obstime + long_err[:,i]
Y_pred[:,i] = eta_long[:,i] + betat[i]*pred_time + long_err[:,i]
true_prob = true_prob.flatten()
ID = np.repeat(range(0,I), repeats=J)
visit = np.tile(range(0,J), reps=I)
data = pd.DataFrame({"id":ID, "visit":visit, "obstime":subj_obstime, "predtime":pred_time,
"time":np.repeat(time,repeats=J),
"event": | np.repeat(event,repeats=J) | numpy.repeat |
import numpy as np
from graphbutler import recipe, save_all, Graph, Parameterized
@recipe
def sine_waves():
g = Graph()
g.x = np.arange(0.0, 10.0, 0.01)
g.y = Parameterized("A", lambda A: A * | np.sin(g.x) | numpy.sin |
import scipy as sp
import numpy as np
from scipy.stats import lognorm as dist
from ngboost.distns import SurvivalDistn
from ngboost.scores import LogScore, CRPScore
class LogNormalLogScore(LogScore):
def score(self, Y):
E = Y['Event']
T = Y['Time']
cens = (1-E) * np.log(1 - self.dist.cdf(T) + self.eps)
uncens = E * self.dist.logpdf(T)
return -(cens + uncens)
def d_score(self, Y):
E = Y['Event'][:,np.newaxis]
T = Y['Time']
lT = np.log(T)
Z = (lT - self.loc) / self.scale
D_uncens = np.zeros((self.loc.shape[0], 2))
D_uncens[:, 0] = (self.loc - lT) / (self.scale ** 2)
D_uncens[:, 1] = 1 - ((self.loc - lT) ** 2) / (self.scale ** 2)
D_cens = np.zeros((self.loc.shape[0], 2))
D_cens[:, 0] = -sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
D_cens[:, 1] = -Z * sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
D_cens[:, 0] = -sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
D_cens[:, 1] = -Z * sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
return (1-E) * D_cens + E * D_uncens
def metric(self):
FI = np.zeros((self.loc.shape[0], 2, 2))
FI[:, 0, 0] = 1/(self.scale ** 2) + self.eps
FI[:, 1, 1] = 2
return FI
class LogNormalCRPScore(CRPScore):
def score(self, Y):
E = Y["Event"]
T = Y["Time"]
lT = np.log(T)
Z = (lT - self.loc) / self.scale
crps_uncens = (self.scale * (Z * (2 * sp.stats.norm.cdf(Z) - 1) + \
2 * sp.stats.norm.pdf(Z) - 1 / np.sqrt(np.pi)))
crps_cens = self.scale * (Z * sp.stats.norm.cdf(Z) ** 2 + \
2 * sp.stats.norm.cdf(Z) * sp.stats.norm.pdf(Z) - \
sp.stats.norm.cdf(np.sqrt(2) * Z) / np.sqrt(np.pi))
return (1-E) * crps_cens + E * crps_uncens
def d_score(self, Y):
E = Y["Event"]
T = Y["Time"]
lT = np.log(T)
Z = (lT - self.loc) / self.scale
D = np.zeros((self.loc.shape[0], 2))
D[:, 0] = E * -(2 * sp.stats.norm.cdf(Z) - 1)
D[:, 0] = (1-E) * -(sp.stats.norm.cdf(Z) ** 2 + \
2 * Z * sp.stats.norm.cdf(Z) * sp.stats.norm.pdf(Z) + \
2 * sp.stats.norm.pdf(Z) ** 2 - \
2 * sp.stats.norm.cdf(Z) * sp.stats.norm.pdf(Z) ** 2 - \
np.sqrt(2/np.pi) * sp.stats.norm.pdf(np.sqrt(2) * Z))
D[:, 1] = self.crps(Y) + (lT - self.loc) * D[:, 0]
return D
def metric(self):
I = 1/(2* | np.sqrt(np.pi) | numpy.sqrt |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = | N.array([1,1,2]) | numpy.array |
import numpy as np
import numpy.typing as npt
AR_b: npt.NDArray[np.bool_]
AR_i8: npt.NDArray[np.int64]
AR_f8: npt.NDArray[np.float64]
AR_M: npt.NDArray[np.datetime64]
AR_O: npt.NDArray[np.object_]
AR_LIKE_f8: list[float]
reveal_type(np.ediff1d(AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]]
reveal_type(np.ediff1d(AR_i8, to_end=[1, 2, 3])) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.ediff1d(AR_M)) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]
reveal_type(np.ediff1d(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
reveal_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.intersect1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.intersect1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.intersect1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[{float64}]], numpy.ndarray[Any, numpy.dtype[{intp}]], numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(np.setxor1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.setxor1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.setxor1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.in1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.in1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.in1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.isin(AR_f8, AR_LIKE_f8, invert=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]]
reveal_type(np.union1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.union1d(AR_M, AR_M)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type(np.union1d(AR_f8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.setdiff1d(AR_i8, AR_i8)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]
reveal_type(np.setdiff1d(AR_M, AR_M, assume_unique=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]]
reveal_type( | np.setdiff1d(AR_f8, AR_i8) | numpy.setdiff1d |
import numpy as np
a = | np.load("/home/xxx/AAAI/My-ZSSBIR/sketchy_acc_im_em.npy") | numpy.load |
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
def print_section_header(title, len_banner=35):
"""
Helper function to print section header with given title
:param title:
:return:
"""
print()
print('#' * len_banner)
print('#', title)
print('#' * len_banner)
# Analyze image details
def analyze_test_image(img_path):
img = cv2.imread(img_path)
img_y_size, img_x_size = img.shape[0:2]
print("Image File: {}".format(img_path))
print("Image Size: {}x{}".format(img_x_size, img_y_size))
print("Image Min/Max Values: ({}, {})".format(img.min(), img.max()))
def compute_curvature_poly2(A, B, y_eval):
return ((1 + (2 * A * y_eval + B)**2)**1.5) / | np.abs(2*A) | numpy.abs |
import numpy as np
import random
# from imblearn.over_sampling import SMOTE
# following libraries for classification test
# import test_nn
import math
class KNNOR:
# def knnor_over_sample(X,y,n_to_sample,num_neighbors,proportion,max_dist_point,intra=True):
def fit_resample(self,X,y,**params):
threshold_cannot_use=10
# check for number of neighbors
if 'num_neighbors' in params.keys():
num_neighbors=params['num_neighbors']
else:
good_neighbor_count=self.good_count_neighbors(X,y)
if good_neighbor_count<=1:
print("Too few neighbors")
return X,y
num_neighbors=random.randrange(1,good_neighbor_count)
if 'max_dist_point' in params.keys():
max_dist_point=params['max_dist_point']
else:
max_dist_point=self.max_threshold_dist(X,y,num_neighbors)
if 'proportion_minority' in params.keys():
'''
proportion of minority population to use
'''
proportion_minority=params['proportion_minority']
inter=False
else:
proportion_intra=self.calculate_distance_threshold(X,y,num_neighbors,intra=False)
proportion_minority=proportion_intra
inter=True
if not self.check_enough_minorities(X,y,num_neighbors):
print("Too few minorities")
return X,y
if 'final_proportion' in params.keys():
'''
final minority pop = what percentage of majority pop
'''
final_proportion=params['final_proportion']
else:
final_proportion=1
n_to_sample=self.calculate_count_to_add(X,y,final_proportion)
original_n_neighbors=num_neighbors
original_max_dist_point=max_dist_point
original_proportion=proportion_minority
minority_label,minority_indices=self.get_minority_label_index(X,y)
X_minority=X[minority_indices]
y_minority=y[minority_indices]
majority_indices=[]
for i in range(0,y.shape[0]):
if i not in minority_indices:
majority_indices.append(i)
print(len(majority_indices),len(minority_indices),y.shape)
X_majority=X[majority_indices]
y_majority=y[majority_indices]
if not inter:
internal_distance = np.linalg.norm(X_minority - X_minority[:,None], axis = -1)
internal_distance = np.sort(internal_distance)
knd=internal_distance[:,num_neighbors]
knd_sorted = np.sort(knd)
else:
external_distance=np.linalg.norm(X_majority - X_minority[:,None], axis = -1)
external_distance = np.sort(external_distance)
knd=external_distance[:,num_neighbors]
knd_sorted=-np.sort(-knd)
threshold_dist = knd_sorted[math.floor(proportion_minority*len(knd_sorted))]
X_new_minority=[]
N = n_to_sample
consecutive_cannot_use=0
while N>0:
for i in range(X_minority.shape[0]):
if inter:
if knd[i]>threshold_dist:
continue
else:
if knd[i]<threshold_dist:
continue
if N==0:
break
v = X_minority[i,:]
val=np.sort( abs((X_minority-v)*(X_minority-v)).sum(axis=1) )
# sort neighbors by distance
# obviously will have to ignore the
# first term as its a distance to iteself
# which wil be 0
posit=np.argsort(abs((X_minority-v)*(X_minority-v)).sum(axis=1))
kv = X_minority[posit[1:num_neighbors+1],:]
alphak = random.uniform(0,max_dist_point)
m0 = v
for j in range(num_neighbors):
m1 = m0 + alphak * (kv[j,:] - m0)
m0 = m1
num_neighbors_to_test=math.floor(math.sqrt(num_neighbors))
can_use=self.predict_classification(X,y,m0, num_neighbors_to_test,minority_label)
can_use=can_use and not(self.check_duplicates(m0,X_minority))
can_use=can_use and not(self.check_duplicates(m0,X_new_minority))
if can_use:
consecutive_cannot_use=0
num_neighbors=min(num_neighbors+1,original_n_neighbors)
max_dist_point=min(max_dist_point+0.01,original_max_dist_point)
proportion_minority=max(proportion_minority-0.01,original_proportion)
threshold_dist = knd_sorted[math.floor(proportion_minority*len(knd_sorted))]
X_new_minority.append(m0)
N-=1
else:
consecutive_cannot_use+=1
if consecutive_cannot_use>=threshold_cannot_use:
num_neighbors=max(num_neighbors-1,2)
max_dist_point=max(max_dist_point-0.01,0.01)
proportion_minority=min(proportion_minority+0.01,0.9)
threshold_dist = knd_sorted[math.floor(proportion_minority*len(knd_sorted))]
consecutive_cannot_use=0
y_new_minority=[minority_label for i in range(len(X_new_minority))]
X_new_minority=np.array(X_new_minority)
X_new_all=np.concatenate((X, X_new_minority), axis=0)
y_new_all= | np.concatenate((y, y_new_minority), axis=0) | numpy.concatenate |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&75': np.array([0.0, 0.47562425924289314]),
'versicolor&2&76': np.array([0.0, 0.4854368956593117]),
'versicolor&2&77': np.array([0.0, -0.7348263896003956]),
'versicolor&2&78': np.array([0.0, -0.7920887571493729]),
'versicolor&2&79': np.array([0.0, -0.507614207038711]),
'versicolor&2&80': np.array([0.0, 0.4854368956593117]),
'versicolor&2&81': np.array([0.0, -0.3982542883933272]),
'versicolor&2&82': np.array([0.0, -0.08633733326458487]),
'versicolor&2&83': np.array([0.0, 0.4039238345412103]),
'versicolor&2&84': np.array([0.0, -0.38897705551367706]),
'versicolor&2&85': np.array([0.0, -0.06915310813754129]),
'versicolor&2&86': np.array([0.0, 0.41580041887839214]),
'versicolor&2&87': np.array([0.0, -0.7348263896003956]),
'versicolor&2&88': np.array([0.0, -0.7348263896003956]),
'versicolor&2&89': np.array([0.0, -0.7920887571493729]),
'versicolor&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&92': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&93': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&96': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&97': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&99': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&100': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&102': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&103': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&104': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&107': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&108': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&111': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&112': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&114': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&115': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&117': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&118': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&119': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&120': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&121': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&122': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&123': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&124': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&125': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&126': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&127': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&128': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&129': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&130': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&131': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&132': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&133': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&134': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&137': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&138': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&141': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&142': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&144': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&145': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&147': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&148': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&149': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&152': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&153': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&156': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&157': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&159': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&160': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&162': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&163': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&164': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&167': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&168': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&171': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&172': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&174': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&175': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&177': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&178': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&179': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&180': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&181': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&182': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&183': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&184': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&185': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&186': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&187': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&188': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&189': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&190': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&191': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&192': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&193': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&194': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&197': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&198': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&201': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&202': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&204': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&205': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&207': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&208': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&209': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&212': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&213': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&216': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&217': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&219': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&220': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&222': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&223': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&224': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&225': np.array([-0.5775629083348267, 0.7118687782288384]),
'versicolor&2&226': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&227': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&228': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&229': np.array([-0.25603689955471853, -0.451727980232351]),
'versicolor&2&230': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&231': np.array([-0.1269405801024398, -0.34161216844748166]),
'versicolor&2&232': np.array([-0.33176333807327857, 0.09538228407203546]),
'versicolor&2&233': np.array([-0.564696311454556, 0.6421194512020755]),
'versicolor&2&234': np.array([-0.12669523681593967, -0.32786313310034665]),
'versicolor&2&235': np.array([-0.35960845047491363, 0.1335988694092619]),
'versicolor&2&236': np.array([-0.589572650064144, 0.6697478899606418]),
'versicolor&2&237': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&238': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&239': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'versicolor&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&242': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&243': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'versicolor&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&246': np.array([0.4022739113634462, -0.4700171786183992]),
'versicolor&2&247': np.array([0.5046771347249378, -0.33609610934748635]),
'versicolor&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'versicolor&2&249': np.array([0.41683021879255133, -0.4812793747667524]),
'versicolor&2&250': np.array([0.5150371666265885, -0.33852139184639396]),
'versicolor&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'versicolor&2&252': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&253': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&254': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&257': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&258': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&261': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&262': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&264': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&265': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&267': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&268': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&269': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&270': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&271': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&275': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&276': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&277': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&278': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&279': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&280': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&281': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&285': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&286': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&290': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&291': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&292': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&293': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&294': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&295': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&296': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&300': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&301': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&305': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&306': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&307': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&308': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&309': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&310': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&311': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'virginica&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&2': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&3': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&6': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&7': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&9': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&10': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&12': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&13': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&14': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&17': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&18': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&21': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&22': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&24': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&25': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&27': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&28': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&29': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&32': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&33': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&36': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&37': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&39': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&40': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&42': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&43': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&44': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&45': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&46': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&47': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&48': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&49': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&50': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&51': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&52': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&53': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&54': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&55': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&56': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&57': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&58': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&59': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&62': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&63': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&66': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&67': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&69': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&70': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&72': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&73': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&74': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&0&75': np.array([0.0, -0.95124502153736]),
'virginica&0&76': np.array([0.0, -0.9708703761803881]),
'virginica&0&77': np.array([0.0, -0.5659706098422994]),
'virginica&0&78': np.array([0.0, -0.3962828716108186]),
'virginica&0&79': np.array([0.0, 0.2538069363248767]),
'virginica&0&80': np.array([0.0, -0.9708703761803881]),
'virginica&0&81': np.array([0.0, -0.5659706098422994]),
'virginica&0&82': np.array([0.0, -0.3962828716108186]),
'virginica&0&83': np.array([0.0, -0.8943993997517804]),
'virginica&0&84': np.array([0.0, -0.5659706098422994]),
'virginica&0&85': np.array([0.0, -0.3962828716108186]),
'virginica&0&86': np.array([0.0, -0.9166476163222441]),
'virginica&0&87': np.array([0.0, -0.3962828716108186]),
'virginica&0&88': np.array([0.0, -0.5466925844560601]),
'virginica&0&89': np.array([0.0, -0.38529908946531777]),
'virginica&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&92': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&93': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&96': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&97': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&99': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&100': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&102': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&103': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&104': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&107': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&108': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&111': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&112': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&114': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&115': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&117': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&118': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&119': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&120': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&121': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&122': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&123': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&124': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&125': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&126': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&127': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&128': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&129': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&130': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&131': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&132': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&133': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&134': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&137': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&138': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&141': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&142': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&144': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&145': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&147': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&148': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&149': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&152': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&153': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&156': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&157': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&159': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&160': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&162': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&163': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&164': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&167': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&168': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&171': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&172': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&174': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&175': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&177': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&178': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&179': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&180': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&181': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&182': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&183': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&184': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&185': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&186': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&187': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&188': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&189': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&190': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&191': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&192': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&193': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&194': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&197': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&198': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&201': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&202': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&204': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&205': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&207': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&208': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&209': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&212': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&213': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&216': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&217': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&219': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&220': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&222': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&223': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&224': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&225': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&226': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&227': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&228': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&229': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&230': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&231': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&232': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&233': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&234': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&235': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&236': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&237': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&238': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&239': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'virginica&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&242': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&243': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'virginica&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&246': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&247': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'virginica&0&249': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&250': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'virginica&0&252': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&253': np.array([-0.544626974647221, -0.24972982107967573]),
'virginica&0&254': np.array([-0.6426355680762406, -0.20016519137103667]),
'virginica&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&257': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&258': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&261': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&262': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&264': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&265': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&267': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&268': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&269': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&270': np.array([-0.04201361383207032, -0.9372571358382161]),
'virginica&0&271': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&272': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&273': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&274': np.array([-0.4167677904879879, 0.22207334821665425]),
'virginica&0&275': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&276': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&277': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&278': np.array([-0.07857689135903215, -0.8696882596532965]),
'virginica&0&279': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&280': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&281': np.array([-0.05160969201296555, -0.9000166344885441]),
'virginica&0&282': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&283': np.array([-0.0766197045034485, -0.5080325256323984]),
'virginica&0&284': np.array([-0.32767091750230254, -0.19689316772421933]),
'virginica&0&285': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&286': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&287': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&288': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&289': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&290': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&291': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&292': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&293': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&294': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&295': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&296': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&297': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&298': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&299': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&302': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&303': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&306': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&307': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&309': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&310': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&312': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&313': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&314': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&2': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&3': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&6': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&7': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&9': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&10': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&12': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&13': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&14': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&17': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&18': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&21': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&22': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&24': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&25': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&27': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&28': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&29': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&32': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&33': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&36': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&37': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&39': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&40': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&42': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&43': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&44': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&45': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&46': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&47': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&48': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&49': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&50': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&51': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&52': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&53': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&54': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&55': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&56': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&57': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&58': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&59': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&62': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&63': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&66': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&67': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&69': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&70': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&72': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&73': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&74': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&1&75': np.array([0.0, 0.4756207622944677]),
'virginica&1&76': np.array([0.0, 0.4854334805210761]),
'virginica&1&77': np.array([0.0, -0.16885577975809632]),
'virginica&1&78': np.array([0.0, -0.39580588553855395]),
'virginica&1&79': np.array([0.0, 0.2538072707138344]),
'virginica&1&80': np.array([0.0, 0.4854334805210761]),
'virginica&1&81': np.array([0.0, -0.16885577975809632]),
'virginica&1&82': np.array([0.0, -0.39580588553855395]),
'virginica&1&83': np.array([0.0, 0.4904755652105692]),
'virginica&1&84': np.array([0.0, -0.16885577975809632]),
'virginica&1&85': np.array([0.0, -0.39580588553855395]),
'virginica&1&86': np.array([0.0, 0.5008471974438506]),
'virginica&1&87': np.array([0.0, -0.39580588553855395]),
'virginica&1&88': np.array([0.0, -0.14423919730424817]),
'virginica&1&89': np.array([0.0, -0.3847817540585927]),
'virginica&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&92': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&93': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&96': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&97': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&99': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&100': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&102': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&103': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&104': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&107': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&108': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&111': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&112': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&114': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&115': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&117': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&118': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&119': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&120': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&121': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&122': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&123': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&124': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&125': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&126': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&127': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&128': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&129': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&130': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&131': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&132': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&133': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&134': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&137': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&138': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&141': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&142': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&144': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&145': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&147': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&148': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&149': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&152': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&153': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&156': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&157': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&159': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&160': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&162': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&163': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&164': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&167': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&168': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&171': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&172': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&174': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&175': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&177': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&178': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&179': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&180': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&181': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&182': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&183': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&184': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&185': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&186': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&187': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&188': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&189': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&190': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&191': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&192': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&193': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&194': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&197': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&198': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&201': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&202': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&204': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&205': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&207': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&208': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&209': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&212': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&213': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&216': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&217': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&219': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&220': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&222': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&223': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&224': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&225': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&226': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&227': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&228': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&229': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&230': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&231': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&232': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&233': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&234': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&235': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&236': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&237': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&238': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&239': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'virginica&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&242': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&243': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'virginica&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&246': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&247': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'virginica&1&249': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&250': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'virginica&1&252': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&253': np.array([0.581569928198426, -0.46134543884925855]),
'virginica&1&254': np.array([0.42361197252581306, -0.5068181610814407]),
'virginica&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&257': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&258': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&261': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&262': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&264': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&265': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&267': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&268': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&269': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&270': np.array([-0.6288817118959938, 0.6849987400957501]),
'virginica&1&271': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&272': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&273': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&274': np.array([-0.3507937472799825, 0.22709708691079003]),
'virginica&1&275': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&276': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&277': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&278': np.array([-0.6219129029345898, 0.6860569455333333]),
'virginica&1&279': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&280': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&281': np.array([-0.6423063482710314, 0.7078274136226649]),
'virginica&1&282': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&283': np.array([-0.38798262782075055, 0.05152547330256509]),
'virginica&1&284': np.array([-0.23804537254556749, -0.24790919248823104]),
'virginica&1&285': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&286': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&287': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&288': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&289': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&290': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&291': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&292': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&293': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&294': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&295': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&296': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&297': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&298': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&299': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&302': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&303': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&306': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&307': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&309': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&310': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&312': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&313': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&314': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&2': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&3': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&6': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&7': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&9': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&10': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&12': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&13': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&14': | np.array([0.32240464148521225, 0.645858545382009]) | numpy.array |
import h5py
import numpy as np
import os
files = ['train.h5', 'train2.h5', 'train3.h5', 'train4.h5', 'train5.h5', 'train6.h5']
if __name__ == '__main__':
new_data = h5py.File('new.h5', 'w')
# for key in new_data.keys():
# print(new_data['data'])
# print(new_data['label_x4'])
for i, file in enumerate(files):
piece = h5py.File(file, 'r')
if i == 0:
dat = piece['data'].value
label_x4 = piece['label_x4'].value
else:
dat = | np.concatenate((dat, piece['data'].value), axis=0) | numpy.concatenate |
from __future__ import annotations
from typing import *
import numpy as np #type: ignore
from enum import Enum
from DataLoader import LoadDatasets, SamplingMethod, Colour, TrainDataset, TestDataset
from VisualWords import MakeGrayPatchExtractor
from sklearn import cluster, metrics, neighbors #type: ignore
import itertools
import math
import os
import sys
import cmath
import numba #type: ignore
from numba import prange #type: ignore
import config
class ClusteringMethod(Enum):
kmeans = 0
OPTICS = 1
class SummaryPerformanceMetrics(NamedTuple):
Accuracy: float
Support: int
MacroAvgPrecision: float
MacroAvgRecall: float
MacroAvgf1: float
MacroAvgSupport:int
WeightAvgPrecision: float
WeightedAvgRecall: float
WeightedAvgf1: float
WeightedAvgSupport: int
def __str__(self):
return f"Summary Performance Metrics:\n\tAccuracy: {self.Accuracy}\n\tSupport: {self.Support}\n\tMacro average precision: {self.MacroAvgPrecision}\n\tMacro average recall: {self.MacroAvgRecall}\n\tMacro average f1 score: {self.MacroAvgf1}\n\tMacro average support: {self.MacroAvgSupport}\n\tWeighted average precision: {self.WeightAvgPrecision}\n\tWeighted average recall: {self.WeightedAvgRecall}\n\tWeighted average f1 score: {self.WeightedAvgf1}\n\tWeighted average support: {self.WeightedAvgSupport}"
class FeatureExtractors(Enum):
Gray_Patches = 0
Colour_Patches = 1
Gray_SIFT = 2
Colour_SIFT = 3
Gray_SIFT_Sparse = 4
@numba.jit(nopython=True)
def Expectation(P_TopicGivenWordDocument, P_TopicGivenDocument, P_WordGivenTopic):
NTopics = P_TopicGivenDocument.shape[0]
NWords = P_WordGivenTopic.shape[0]
NDocs = P_TopicGivenDocument.shape[1]
for i in prange(NDocs):
for j in range(NWords):
divisor = 0
for k in range(NTopics):
divisor += P_WordGivenTopic[j,k]*P_TopicGivenDocument[k,i]
if divisor == 0:
for k in range(NTopics):
P_TopicGivenWordDocument[k,i,j] = 0
else:
for k in range(NTopics):
P_TopicGivenWordDocument[k,i,j] = P_WordGivenTopic[j,k]*P_TopicGivenDocument[k,i]/divisor
return P_TopicGivenWordDocument
@numba.jit(nopython=True)
def Maximize_TopicGivenDocument(Old_P_TopicGivenDocument, Co_OccurenceTable, P_TopicGivenDocumentWord):
NTopics = P_TopicGivenDocumentWord.shape[0]
NWords = P_TopicGivenDocumentWord.shape[2]
NDocs = P_TopicGivenDocumentWord.shape[1]
N = np.sum(Co_OccurenceTable, axis=0)
#print(Old_P_TopicGivenDocument.shape)
#print(P_TopicGivenDocumentWord.shape)
#print(f"NTopcis:{NTopics}")
#print(f"NWords:{NWords}")
#print(f"NDocs:{NDocs}")
for i in prange(NDocs):
for k in range(NTopics):
Old_P_TopicGivenDocument[k,i] = 0
for j in range(NWords):
Old_P_TopicGivenDocument[k,i] += Co_OccurenceTable[j,i]*P_TopicGivenDocumentWord[k,i,j]
Old_P_TopicGivenDocument[k,i]/N[i]
return Old_P_TopicGivenDocument
@numba.jit(nopython=True)
def Maximize_WordGivenTopic(Old_P_WordGivenDocument, Co_OccurenceTable, P_TopicGivenDocumentWord, NormalizationFactors):
NTopics = P_TopicGivenDocumentWord.shape[0]
NWords = P_TopicGivenDocumentWord.shape[2]
NDocs = P_TopicGivenDocumentWord.shape[1]
for k in range(NTopics):
NormalizationFactors[k] = 0
for m in prange(NWords):
for i in range(NDocs):
for k in range(NTopics):
NormalizationFactors[k] += Co_OccurenceTable[m,i]*P_TopicGivenDocumentWord[k,i,m]
for j in prange(NWords):
for k in range(NTopics):
partial_sum = 0
if NormalizationFactors[k] == 0:
Old_P_WordGivenDocument[j,k] = 0
continue
for i in range(NDocs):
partial_sum += Co_OccurenceTable[j,i]*P_TopicGivenDocumentWord[k,i,j]
Old_P_WordGivenDocument[j,k] = partial_sum/NormalizationFactors[k]
return Old_P_WordGivenDocument
@numba.jit(nopython=True)
def logLiklihood_jit(P_TopicGivenDocument, Co_OccurenceTable, P_TopicGivenDocumentWord, P_WordGivenTopic) -> float:
ll = 0.0
NTopics = P_TopicGivenDocumentWord.shape[0]
NWords = P_TopicGivenDocumentWord.shape[2]
NDocs = P_TopicGivenDocumentWord.shape[1]
for N in prange(NDocs):
for M in range(NWords):
partial_sum = 0.0
for K in range(NTopics):
Nan_test = P_TopicGivenDocumentWord[K, N, M]*np.log(P_WordGivenTopic[M, K] * P_TopicGivenDocument[K,N])
if not cmath.isnan(Nan_test):
partial_sum += Nan_test
ll += partial_sum * Co_OccurenceTable[N,M]
return ll
class PLSA:
def __init__(self,
BaseImageListPath:str = "./dataset/filelist/places365_train_standard.txt",
BaseDatasetPath:str="./dataset/data",
NumVisualWords: int=1500,
NumTopics: int=25,
NumNeighbors:int = 10,
NumCategories:int = 365,
TrainSize:int = 1000,
TestSize: int = 1000,
SamplingMethod = SamplingMethod.Deterministic,
ImageColour = Colour.GRAY,
ImageTransform: Any = None,
Eps = 0.000001,
MaxIter = 1000
):
self.SamplingMethod = SamplingMethod
self.NumCategories = NumCategories
self.NumVisualWords = NumVisualWords
self.NumTopics = NumTopics
self.NumNeighbors = NumNeighbors
self.Eps = Eps
self.MaxIter = MaxIter
self.TrainingDataset: TrainDataset
self.TestingDataset: TestDataset
self.TrainingDataset, self.TestingDataset = LoadDatasets(BaseImageListPath, BaseDatasetPath, TrainSize, TestSize, NumCategories, SamplingMethod, ImageColour, ImageTransform)
self.Extractor = MakeGrayPatchExtractor()
self.KNNClassifier = neighbors.KNeighborsClassifier(n_neighbors=NumNeighbors)
self.ImageLabels = np.zeros((1,1))
self.WordCenters = np.zeros((1,1))
# Axis 0 - Number of unique words
# Axis 1 - Number of topics
self.P_WordGivenTopic = np.full((NumVisualWords, NumTopics), 1/NumVisualWords, dtype="float64")
#A uniform distribution is assumed at first so no additional normalization is required
def train(self):
print("Beginning training")
AllVisualWords = np.zeros((1,1))
NumberOfImageFeatures = []
self.ImageLabels = np.zeros((len(self.TrainingDataset),))
numFeatures = -1
feature_idx = 0
image_idx = 0
print("Extracting all image features")
for img in self.TrainingDataset:
vWords = self.Extractor(img[0])
self.ImageLabels[image_idx] = img[1]
NumberOfImageFeatures.append(vWords.NumFeatureVecs)
if numFeatures == -1:
totalNumberOfFeatures = vWords.NumFeatureVecs * len(self.TrainingDataset)
numFeatures = vWords.NumFeatureVecs
AllVisualWords = np.zeros((totalNumberOfFeatures, vWords.FeatureDim))
if AllVisualWords.shape[0] < feature_idx + vWords.NumFeatureVecs:
AllVisualWords = np.pad(AllVisualWords,((0,vWords.NumFeatureVecs*(len(self.TrainingDataset)-image_idx)), (0,0)))
for feature in vWords:
try:
AllVisualWords[feature_idx, :] = feature
feature_idx += 1
except Exception as e:
print(f"On the {image_idx}th image out of {len(self.TrainingDataset)}")
print(f"There are {numFeatures} per image\n On the {feature_idx}th feature of the image")
print(e)
exit(1)
image_idx += 1
print("Completed feature extraction, beginning cluster visual words")
labels = self.cluster(AllVisualWords[0:feature_idx,:])
print("Done clustering beginning EM")
img_index = 0
img_word_index = 0
Co_OccurenceTable = np.zeros((self.NumVisualWords, len(self.TrainingDataset)), dtype="uint32")
for label in labels:
Co_OccurenceTable[label, img_index] += 1
img_word_index += 1
if img_word_index == NumberOfImageFeatures[img_index]:
img_word_index = 0
img_index += 1
# Axis 0 - Number of topics
# Axis 1 - Length of training set or number of documents
P_TopicGivenDocument = np.random.random((self.NumTopics, len(self.TrainingDataset)))
#Normalize the conditional distribution
P_TopicGivenDocument = P_TopicGivenDocument/np.sum(P_TopicGivenDocument, axis=0)
# Axis 0 - number of topics
# Axis 1 - number of documents
# Axis 2 - number of words
P_TopicGivenDocumentWord = np.random.random((self.NumTopics, len(self.TrainingDataset), self.NumVisualWords))
P_TopicGivenDocumentWord = P_TopicGivenDocumentWord/ np.sum(P_TopicGivenDocumentWord, axis = 0)
#Beginning EM maximization
NormalizationFactors = np.zeros((self.NumTopics,), dtype="float64")
Old_logLiklihood = -sys.float_info.max
New_logLiklihood = 0.0
for iteration in range(self.MaxIter):
New_logLiklihood = logLiklihood_jit(P_TopicGivenDocument, Co_OccurenceTable, P_TopicGivenDocumentWord, self.P_WordGivenTopic)
if New_logLiklihood - Old_logLiklihood < self.Eps:
break
print(f"\t[{iteration}]: delta= {New_logLiklihood - Old_logLiklihood}")
Old_logLiklihood = New_logLiklihood
#Expectation portion
P_TopicGivenDocumentWord = Expectation(P_TopicGivenDocumentWord, P_TopicGivenDocument, self.P_WordGivenTopic)
#Maximization portion
P_TopicGivenDocument = Maximize_TopicGivenDocument(P_TopicGivenDocument, Co_OccurenceTable, P_TopicGivenDocumentWord)
self.P_WordGivenTopic = Maximize_WordGivenTopic(self.P_WordGivenTopic, Co_OccurenceTable, P_TopicGivenDocumentWord, NormalizationFactors)
print("Done EM training KNN classifier")
self.KNNClassifier.fit(P_TopicGivenDocument.T, self.ImageLabels)
print("Done training")
def calculate_Z_vector(self, image):
vWords = self.Extractor(image)
#Z vector is equivalent to a portion of the topic specific distribution given the document
#the distribution of the words given the topics is the distribution that was fitted to the training data
P_TopicGivenDocumentWord = | np.random.random((self.NumTopics, 1, self.NumVisualWords)) | numpy.random.random |
import numpy as np
import math
import sys
import matplotlib.pyplot as plt
class initialGeometry():
def __init__(self, c, Mchord, typeSpacing, pitchSSrad, rotationPt):
self.c = c
self.Mchord = Mchord
self.typeSpacing = typeSpacing
self.pitchSSrad = pitchSSrad
self.rotationPt = rotationPt
self.panelEndPts = self.getPanelEndPts()
self.panelVortexPts = self.getPanelVortexPts()
self.panelEvalPts = self.getPanelEvalPts()
self.panelNormVec = self.getPanelNormVec()
self.panelTangVec = self.getPanelTangVec()
self.panelArea = self.getPanelArea()
self.checkPanelNormVectorOrthogonality = self.getCheckPanelNormVectorOrthogonality()
self.checkPanelNormVectorMagnitude = self.getCheckPanelNormVectorMagnitude()
def getPanelEndPts(self):
panelEndPts = np.zeros((self.Mchord+1,2))
thetaSpacing = np.linspace(0,180,self.Mchord+1)
rotationMatrix = np.array([[math.cos(self.pitchSSrad), math.sin(self.pitchSSrad)],[-math.sin(self.pitchSSrad), math.cos(self.pitchSSrad)]])
# Computation of panelEndPts x-component
if self.typeSpacing == "uniform":
panelEndPts[:,0] = np.linspace(0,self.c,self.Mchord+1) - self.rotationPt
elif self.typeSpacing == "cosine":
for i in range(self.Mchord+1):
panelEndPts[i,0] = 0.5*self.c*(1-math.cos(math.radians(thetaSpacing[i]))) - self.rotationPt
else:
sys.exit('typeSpacing must be "uniform" or "cosine"')
# Rotation of panelEndPts by pitchSS angle
for i in range(self.Mchord+1):
panelEndPts[i,:] = np.matmul(rotationMatrix, panelEndPts[i,:])
return panelEndPts
def getPanelVortexPts(self):
panelVortexPts = np.zeros((self.Mchord,2))
for i in range(self.Mchord):
vec = self.panelEndPts[i+1,:] - self.panelEndPts[i,:]
panelVortexPts[i,:] = self.panelEndPts[i,:] + 0.25*vec
return panelVortexPts
def getPanelEvalPts(self):
panelEvalPts = np.zeros((self.Mchord,2))
for i in range(self.Mchord):
vec = self.panelEndPts[i+1,:] - self.panelEndPts[i,:]
panelEvalPts[i,:] = self.panelEndPts[i,:] + 0.75*vec
return panelEvalPts
def getPanelNormVec(self):
panelNormVec = np.zeros((self.Mchord,2))
thetaRotation = np.pi/2
transformationMatrix = np.array([[math.cos(thetaRotation), -math.sin(thetaRotation)],[math.sin(thetaRotation), math.cos(thetaRotation)]])
for i in range(self.Mchord):
vec = self.panelEndPts[i+1,:] - self.panelEndPts[i,:] # this vector is rotated by pi/2 in the counter-clockwise direction
panelNormVec[i,:] = ( np.matmul(transformationMatrix, vec) ) / np.linalg.norm(vec)
return panelNormVec
def getPanelTangVec(self):
panelTangVec = np.zeros((self.Mchord,2))
for i in range(self.Mchord):
vec = self.panelEndPts[i+1,:] - self.panelEndPts[i,:]
panelTangVec[i,:] = vec / np.linalg.norm(vec)
return panelTangVec
def getPanelArea(self):
panelArea = np.zeros(self.Mchord)
for i in range(self.Mchord):
vec = self.panelEndPts[i+1,:] - self.panelEndPts[i,:]
panelArea[i] = np.linalg.norm(vec)
return panelArea
def getCheckPanelNormVectorOrthogonality(self):
checkPanelNormVectorOrthogonality = np.zeros(self.Mchord)
for i in range(self.Mchord):
vec = self.panelEndPts[i+1,:] - self.panelEndPts[i,:]
checkPanelNormVectorOrthogonality[i] = np.vdot(vec,self.panelNormVec[i,:])
return checkPanelNormVectorOrthogonality
def getCheckPanelNormVectorMagnitude(self):
checkPanelNormVectorMagnitude = | np.zeros(self.Mchord) | numpy.zeros |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from seir.wrapper import MultiPopWrapper
from seir.utils import plot_solution
# read calibration data
actual_hospitalisations = pd.read_excel('data/calibration.xlsx', sheet_name='Hospitalisations')
actual_hospitalisations['Date'] = [pd.to_datetime(x, ).date() for x in actual_hospitalisations['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
actual_infections = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_confirmed.csv')
actual_infections.rename(columns={'date': 'Date', 'total': 'Cum. Confirmed'}, inplace=True)
actual_infections.index = pd.to_datetime(actual_infections['Date'], dayfirst=True)
actual_infections = actual_infections.resample('D').mean().ffill().reset_index()
actual_infections['Date'] = [pd.to_datetime(x, dayfirst=True).date() for x in actual_infections['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
reported_deaths = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_deaths.csv')
reported_deaths.rename(columns={'date': 'Date'}, inplace=True)
reported_deaths['Date'] = [pd.to_datetime(x, dayfirst=True).date() for x in reported_deaths['Date']]
actual_deaths = reported_deaths.groupby('Date').report_id.count().reset_index()
actual_deaths.rename(columns={'report_id': 'Daily deaths'}, inplace=True)
actual_deaths.index = pd.to_datetime(actual_deaths['Date'])
actual_deaths = actual_deaths.resample('D').mean().fillna(0).reset_index()
actual_deaths['Cum. Deaths'] = np.cumsum(actual_deaths['Daily deaths'])
# variable parameters for front-end
asymptomatic_prop = 0.75 # 0.2-0.8
asymp_rel_infectivity = 0.5 # 0.3 - 1
asymp_prop_imported = 0.0 # 0 - 0.8
r0 = 2.6 # 1.5 - 5.5
lockdown_ratio = 0.6 # 0.25 - 0.8
imported_scale = 2.5 # 0.5 - 2
lockdown_period = 35 # 35, 42, 49, 56, 63, 70
social_distancing_ratio = 0.75 # 0.5-1
period_asymp = 2.3 # 8-12
period_mild_infect = 2.3 # 2-4
period_severe_infect = 2.3 # 2-4
period_severe_isolate = 6 - period_severe_infect
period_hosp_if_not_icu = 10 # 6-10
period_hosp_if_icu = 8 # 6-10
period_icu_if_recover = 10 # 8-12
period_icu_if_die = 6 # 3-7
mort_loading = 1.0 # 0.5 - 1.5
prop_mild_detected = 0.3 # 0.2 - 0.8
hosp_to_icu = 0.2133 # 0.1 - 0.4 (0.21330242 = Ferguson)
descr = 'asymp_' + str(asymptomatic_prop) + '_R0_' + str(r0) + '_imported_scale_' + str(
imported_scale) + '_lockdown_' + str(lockdown_ratio) + '_postlockdown_' + str(
social_distancing_ratio) + '_ICU_' + str(hosp_to_icu) + '_mort_' + str(mort_loading) + '_asympinf_' + str(
asymp_rel_infectivity)
full_descr = f'Baseline R0: {r0:.1f}, asymptomatic proportion: {asymptomatic_prop:.0%}, asymptomatic relative ' \
f'infectiousness {asymp_rel_infectivity:.0%}, {prop_mild_detected:.0%} of mild cases detected \n '
full_descr += f'Imported scaling factor {imported_scale:.2f}, asymptomatic proportion imported {asymp_prop_imported:.0%}\n '
full_descr += f'Lockdown period: {lockdown_period:,.0f}, R0 relative to baseline {lockdown_ratio:.0%} in lockdown,' \
f'{social_distancing_ratio:.0%} post-lockdown \n '
full_descr += f'Infectious days pre-isolation: {period_asymp} asymptomatic, {period_mild_infect} mild, {period_severe_infect} severe; severe isolation days pre-hospitalisation: {period_severe_isolate} \n'
full_descr += f'Hospital days: {period_hosp_if_not_icu} not critical, {period_hosp_if_icu} critical plus {period_icu_if_recover} in ICU if recover/{period_icu_if_die} if die \n'
full_descr += f'Proportion of hospitalised cases ending in ICU: {hosp_to_icu:.2%}, mortality loading {mort_loading:.0%}'
# get s0 from file:
df = pd.read_csv('data/Startpop_2density_0comorbidity.csv') # , index_col=0)
df['density'] = df['density'].map({'High': 'high', 'Low': 'low'})
df['label'] = df['age'].str.lower() + '_' + df['sex'].str.lower() + '_' + df['density'].str.lower()
df_dict = df[['label', 'Population']].to_dict()
s_0 = {df_dict['label'][i]: df_dict['Population'][i] for i in df_dict['label'].keys()}
# Ferguson et al. parameterisation
ferguson = {'0-9': [0.001, 0.05, 0.00002],
'10-19': [0.003, 0.05, 0.00006],
'20-29': [0.012, 0.05, 0.0003],
'30-39': [0.032, 0.05, 0.0008],
'40-49': [0.049, 0.063, 0.0015],
'50-59': [0.102, 0.122, 0.006],
'60-69': [0.166, 0.274, 0.022],
'70-79': [0.243, 0.432, 0.051],
'80+': [0.273, 0.709, 0.093]}
# work out deaths as % of those entering ICU
for key in ferguson:
# TODO: add this calc to the df, not to the lists.
ferguson[key].append(ferguson[key][2] / ferguson[key][1] / ferguson[key][0])
# age profile - calculate ICU transition adjustment
age_profile = df.groupby('age').Population.sum().reset_index()
ferguson_df = pd.DataFrame(ferguson).T.reset_index()
ferguson_df.rename(columns={'index': 'age', 0: 'symp_to_hosp', 1: 'hosp_to_icu', 2: 'symp_to_dead', 3: 'icu_to_dead'},
inplace=True)
age_profile['Proportion'] = age_profile['Population'] / age_profile['Population'].sum()
age_profile = age_profile.merge(ferguson_df[['age', 'symp_to_hosp', 'hosp_to_icu']], on='age')
age_profile['hosp'] = age_profile['Proportion'] * age_profile['symp_to_hosp']
age_profile['prop_hosp'] = age_profile['hosp'] / age_profile['hosp'].sum()
age_profile['overall_hosp_to_icu'] = age_profile['prop_hosp'] * age_profile['hosp_to_icu']
overall_hosp_to_icu = age_profile['overall_hosp_to_icu'].sum()
icu_adjustment = hosp_to_icu / overall_hosp_to_icu # ~1 when hosp_to_icu is == ferguson number
# hard-coded parameters
infectious_func = lambda t: 1 if t < 11 else (
1 - (1 - social_distancing_ratio) / 11 * (t - 11)) if 11 <= t < 22 else lockdown_ratio if 22 <= t < (
22 + lockdown_period) else social_distancing_ratio
c = 1
s = 0.06 # proportion of imported cases below 60 that are severe (1-s are mild)
# scale of ferguson ratio for 60+ - setting to inverse value from ferguson means we assume 100% of cases 60+ are severe
scale = {'60-69': 1,
'70-79': 1/ferguson['70-79'][0],
'80+': 1/ferguson['80+'][0]}
a = 0.25
l = asymp_prop_imported / (1 - asymp_prop_imported)
x = c * imported_scale
imported_func = lambda t: {'0-9_male_high': [0.0101 * x * l * np.exp(a * t), 0.0101 * x * (1 - s) * np.exp(a * t),
0.0101 * x * s * np.exp(a * t), 0, 0, 0],
'10-19_male_high': [0.0101 * x * l * np.exp(a * t), 0.0101 * x * (1 - s) * np.exp(a * t),
0.0101 * x * s * np.exp(a * t), 0, 0, 0],
'20-29_male_high': [0.0657 * x * l * np.exp(a * t), 0.0657 * x * (1 - s) * np.exp(a * t),
0.0657 * x * s * np.exp(a * t), 0, 0, 0],
'30-39_male_high': [0.1768 * x * l * np.exp(a * t), 0.1768 * x * (1 - s) * np.exp(a * t),
0.1768 * x * s * np.exp(a * t), 0, 0, 0],
'40-49_male_high': [0.0960 * x * l * np.exp(a * t), 0.0960 * x * (1 - s) * np.exp(a * t),
0.0960 * x * s * np.exp(a * t), 0, 0, 0],
'50-59_male_high': [0.1717 * x * l * np.exp(a * t), 0.1717 * x * (1 - ferguson['50-59'][0]) * np.exp(a * t),
0.1717 * x * ferguson['50-59'][0] * np.exp(a * t), 0, 0, 0],
'60-69_male_high': [0.0758 * x * l * np.exp(a * t), 0.0758 * x * (1 - scale['60-69'] * ferguson['60-69'][0]) * np.exp(a * t), 0.0758 * x * scale['60-69'] * ferguson['60-69'][0] * np.exp(a * t), 0, 0, 0],
'70-79_male_high': [0.0202 * x * l * np.exp(a * t), 0.0202 * x * (1 - scale['70-79'] * ferguson['70-79'][0]) * np.exp(a * t), 0.0202 * x * scale['70-79'] * ferguson['70-79'][0] * np.exp(a * t), 0, 0, 0],
'80+_male_high': [0.0051 * x * l * np.exp(a * t), 0.0051 * x * (1 - scale['80+'] * ferguson['80+'][0]) * np.exp(a * t), 0.0051 * x * scale['80+'] * ferguson['80+'][0] * np.exp(a * t), 0, 0, 0],
'0-9_female_high': [0.0000 * x * l * np.exp(a * t), 0.0000 * x * (1 - s) * | np.exp(a * t) | numpy.exp |
import sys
from copy import copy
from itertools import chain
from numpy import *
from scipy.signal import medfilt as MF
from scipy.stats import scoreatpercentile as sap
from numpy.random import normal, seed
from statsmodels.robust import mad
from george.kernels import ConstantKernel, Matern32Kernel, DotProductKernel
from .core import *
from .lpf import *
from .extcore import *
from .lpfsd import LPFSD
class LPFSRR(LPFSD):
def __init__(self, passband, lctype='relative', use_ldtk=False, n_threads=1, night=2, pipeline='gc'):
super().__init__(passband, lctype, use_ldtk, n_threads, night, pipeline)
self.fluxes = asarray(self.fluxes)
self.fluxes_m = self.fluxes.mean(0)
self.fluxes /= self.fluxes_m
self.wn_estimates = array([sqrt(2) * mad(diff(f)) for f in self.fluxes])
self.priors = self.priors[:self._sgp]
# Update the priors using the external data modelling
# ---------------------------------------------------
self.priors[0] = NP(125.417380, 8e-5, 'tc') # 0 - Transit centre
self.priors[1] = NP(3.06785547, 4e-7, 'p') # 1 - Period
self.priors[2] = NP(4.17200000, 3e-2, 'rho') # 2 - Stellar density
self.priors[3] = NP(0.16100000, 2e-2, 'b') # 3 - Impact parameter
# Change the GP priors slightly
# -----------------------------
self.priors.extend([UP(-5, -1, 'log10_constant'),
UP(-5, -1, 'log10_ra_amplitude'),
UP(-5, 3, 'log10_ra_inv_scale')])
self.ps = PriorSet(self.priors)
self.prior_kw = NP(0.1707, 3.2e-4, 'kw', lims=(0.16,0.18))
# Use mock data
# -------------
if passband == 'nb_mock' and type(self) == LPFSRR:
self.create_mock_nb_dataset()
def set_pv_indices(self, sbl=None, swn=None):
self.ik2 = [self._sk2 + pbid for pbid in self.gpbids]
self.iq1 = [self._sq1 + pbid * 2 for pbid in self.gpbids]
self.iq2 = [self._sq2 + pbid * 2 for pbid in self.gpbids]
self.uq1 = np.unique(self.iq1)
self.uq2 = np.unique(self.iq2)
sbl = sbl if sbl is not None else self._sbl
self.ibcn = [sbl + ilc for ilc in range(self.nlc)]
if hasattr(self, '_sgp'):
self.slgp = s_[self._sgp:self._sgp + 3]
def setup_gp(self):
self.gp_inputs = array([self.airmass, self.rotang]).T
self.kernel = (ConstantKernel(1e-3 ** 2, ndim=2, axes=0) + DotProductKernel(ndim=2, axes=0)
+ ConstantKernel(1e-3 ** 2, ndim=2, axes=1) * ExpSquaredKernel(.01, ndim=2, axes=1))
self.gp = GP(self.kernel)
self.gp.compute(self.gp_inputs, yerr=5e-4)
def map_to_gp(self, pv):
log10_to_ln = 1. / log10(e)
gpp = zeros(3)
gpp[0] = 2 * pv[0] * log10_to_ln
gpp[1] = 2 * pv[1] * log10_to_ln
gpp[2] = -pv[2] * log10_to_ln
return gpp
def lnposterior(self, pv):
_k = median(sqrt(pv[self.ik2]))
return super().lnposterior(pv) + self.prior_kw.log(_k)
def compute_lc_model(self, pv, copy=False):
bl = self.compute_baseline(pv)
tr = self.compute_transit(pv)
self._wrk_lc[:] = bl[:,np.newaxis]*tr/tr.mean(0)
return self._wrk_lc if not copy else self._wrk_lc.copy()
def create_mock_nb_dataset(self):
tc, p, rho, b = 125.417380, 3.06785547, 4.17200000, 0.161
ks = np.full(self.npb, 0.171)
ks[1::3] = 0.170
ks[2::3] = 0.172
ks[[7, 13]] = 0.173
q1 = array([0.581, 0.582, 0.590, 0.567, 0.541, 0.528, 0.492, 0.490,
0.461, 0.440, 0.419, 0.382, 0.380, 0.368, 0.344, 0.328,
0.320, 0.308, 0.301, 0.292])
q2 = array([0.465, 0.461, 0.446, 0.442, 0.425, 0.427, 0.414, 0.409,
0.422, 0.402, 0.391, 0.381, 0.379, 0.373, 0.369, 0.365,
0.362, 0.360, 0.360, 0.358])
seed(0)
cam = normal(0, 0.03, self.nlc)
ctm = | normal(0, 0.08, self.nlc) | numpy.random.normal |
import math
import numpy as np
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import CharRNN
from data import TextDataset, TextConverter
class Trainer(object):
def __init__(self, args):
self.args = args
self.device = torch.device('cuda' if self.args.cuda else 'cpu')
self.convert = None
self.model = None
self.optimizer = None
self.criterion = self.get_loss
self.meter = AverageValueMeter()
self.train_loader = None
self.get_data()
self.get_model()
self.get_optimizer()
def get_data(self):
self.convert = TextConverter(self.args.txt, max_vocab=self.args.max_vocab)
dataset = TextDataset(self.args.txt, self.args.len, self.convert.text_to_arr)
self.train_loader = DataLoader(dataset, self.args.batch_size, shuffle=True, num_workers=self.args.num_workers)
def get_model(self):
self.model = CharRNN(self.convert.vocab_size, self.args.embed_dim, self.args.hidden_size, self.args.num_layers,
self.args.dropout, self.args.cuda).to(self.device)
if self.args.cuda:
cudnn.benchmark = True
def get_optimizer(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.optimizer = ScheduledOptim(optimizer)
@staticmethod
def get_loss(score, label):
return nn.CrossEntropyLoss()(score, label.view(-1))
def save_checkpoint(self, epoch):
if (epoch + 1) % self.args.save_interval == 0:
model_out_path = self.args.save_file + "epoch_{}_model.pth".format(epoch + 1)
torch.save(self.model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
def save(self):
model_out_path = self.args.save_file + "final_model.pth"
torch.save(self.model, model_out_path)
print("Final model saved to {}".format(model_out_path))
@staticmethod
def pick_top_n(predictions, top_n=5):
top_predict_prob, top_predict_label = torch.topk(predictions, top_n, 1)
top_predict_prob /= torch.sum(top_predict_prob)
top_predict_prob = top_predict_prob.squeeze(0).cpu().numpy()
top_predict_label = top_predict_label.squeeze(0).cpu().numpy()
c = | np.random.choice(top_predict_label, size=1, p=top_predict_prob) | numpy.random.choice |
"""Script containing a non-flow variant of the ring road environment."""
import numpy as np
import csv
import time
import random
import json
import gym
from scipy.optimize import fsolve
from collections import defaultdict
from gym.spaces import Box
from copy import deepcopy
from hbaselines.envs.mixed_autonomy.envs.utils import get_rl_accel
from hbaselines.envs.mixed_autonomy.envs.utils import v_eq_function
# the length of the individual vehicles
VEHICLE_LENGTH = 5.0
# a normalizing term for the vehicle headways
MAX_HEADWAY = 100.0
# a normalizing term for the vehicle speeds
MAX_SPEED = 10.0
class RingEnv(gym.Env):
"""Non-flow variant of the ring road environment.
Attributes
----------
initial_state : str or None
the initial state. Must be one of the following:
* None: in this case, vehicles are evenly distributed
* "random": in this case, vehicles are randomly placed with a minimum
gap between vehicles specified by "min_gap"
* str: A string that is not "random" is assumed to be a path to a json
file specifying initial vehicle positions and speeds
length : float
the length of the ring at the current time step
num_vehicles : int
total number of vehicles in the network
dt : float
seconds per simulation step
horizon : int
the environment time horizon, in steps
sims_per_step : int
the number of simulation steps per environment step
max_accel : float
scaling factor for the AV accelerations, in m/s^2
min_gap : float
the minimum allowable gap by all vehicles. This is used during the
failsafe computations.
gen_emission : bool
whether to generate the emission file
rl_ids : array_like
the indices of vehicles that are treated as automated, or RL, vehicles
num_rl : int
the number of automated, or RL, vehicles
warmup_steps : int
number of steps performed before the initialization of training during
a rollout
maddpg : bool
whether to use a variant that is compatible with MADDPG
t : int
number of simulation steps since the start of the current rollout
positions : array_like
positions of all vehicles in the network
speeds : array_like
speeds of all vehicles in the network
headways : array_like
bumper-to-bumper gaps of all vehicles in the network
accelerations : array_like
previous step accelerations by the individual vehicles
v0 : float
desirable velocity, in m/s
T : float
safe time headway, in s
a : float
max acceleration, in m/s2
b : float
comfortable deceleration, in m/s2
delta : float
acceleration exponent
s0 : float
linear jam distance, in m
noise : float
std dev of normal perturbation to the acceleration
decel : float
maximum desired deceleration
delay : float
delay in applying the action, in seconds. This is used by the failsafe
computation.
"""
def __init__(self,
length,
num_vehicles,
dt,
horizon,
sims_per_step,
max_accel=0.5,
min_gap=1.0,
gen_emission=False,
rl_ids=None,
warmup_steps=0,
initial_state=None,
maddpg=False,
obs_frames=5):
"""Instantiate the environment class.
Parameters
----------
length : float or [float, float]
the length of the ring if a float, and a range of [min, max] length
values that are sampled from during the reset procedure
num_vehicles : int
total number of vehicles in the network
dt : float
seconds per simulation step
horizon : int
the environment time horizon, in steps
sims_per_step : int
the number of simulation steps per environment step
max_accel : float
scaling factor for the AV accelerations, in m/s^2
min_gap : float
the minimum allowable gap by all vehicles. This is used during the
failsafe computations.
gen_emission : bool
whether to generate the emission file
rl_ids : list of int or None
the indices of vehicles that are treated as automated, or RL,
vehicles
warmup_steps : int
number of steps performed before the initialization of training
during a rollout
initial_state : str or None
the initial state. Must be one of the following:
* None: in this case, vehicles are evenly distributed
* "random": in this case, vehicles are randomly placed with a
minimum gap between vehicles specified by "min_gap"
* str: A string that is not "random" is assumed to be a path to a
json file specifying initial vehicle positions and speeds
maddpg : bool
whether to use a variant that is compatible with MADDPG
obs_frames : int
number of observation frames to use. Additional frames are
provided from previous time steps.
"""
self._length = length
# Load the initial state (if needed).
if isinstance(initial_state, str) and initial_state != "random":
with open(initial_state, "r") as fp:
self.initial_state = json.load(fp)
self._length = list(self.initial_state.keys())
else:
self.initial_state = initial_state
self.length = self._set_length(self._length)
self.num_vehicles = num_vehicles
self.dt = dt
self.horizon = horizon
self.sims_per_step = sims_per_step
self.max_accel = max_accel
self.min_gap = min_gap
self.gen_emission = gen_emission
self.num_rl = len(rl_ids) if rl_ids is not None else 0
self.rl_ids = np.asarray(rl_ids)
self.warmup_steps = warmup_steps
self.maddpg = maddpg
self.obs_frames = obs_frames
self._time_log = None
self._v_eq = 0.
self._mean_speeds = None
self._mean_accels = None
# observations from previous time steps
self._obs_history = defaultdict(list)
# simulation parameters
self.t = 0
self.positions, self.speeds = self._set_initial_state(
length=self.length,
num_vehicles=self.num_vehicles,
initial_state=self.initial_state,
min_gap=self.min_gap,
)
self.headways = self._compute_headway()
self.accelerations = np.array([0. for _ in range(num_vehicles)])
self._emission_data = []
# human-driver model parameters
self.v0 = 30
self.T = 1
self.a = 1.3
self.b = 2.0
self.delta = 4
self.s0 = 2
self.noise = 0.2
# failsafe parameters
self.decel = 4.5
self.delay = self.dt
@staticmethod
def _set_length(length):
"""Update the length of the ring road.
Parameters
----------
length : float or [float, float]
the length of the ring if a float, and a range of [min, max] length
values that are sampled from during the reset procedure
Returns
-------
float
the updated ring length
"""
if isinstance(length, list):
if len(length) == 2:
# if the range for the length term was defined by the length
# parameter
length = random.randint(length[0], length[1])
else:
# if the lengths to choose from were defined the initial_states
# parameter
length = int(random.choice(length))
return length
@staticmethod
def _set_initial_state(length, num_vehicles, initial_state, min_gap):
"""Choose an initial state for all vehicles in the network.
Parameters
----------
length : float
the length of the ring road
num_vehicles : int
number of vehicles in the network
initial_state : str or None or dict
the initial state. See description in __init__.
Returns
-------
array_like
initial vehicle positions
array_like
initial vehicle speeds
"""
if initial_state is None:
# uniformly distributed vehicles
pos = np.arange(0, length, length / num_vehicles)
# no initial speed (0 m/s)
vel = np.array([0. for _ in range(num_vehicles)])
elif initial_state == "random":
# Choose random number not including a minimum gap.
pos = sorted(np.random.uniform(
low=0,
high=length - num_vehicles * (VEHICLE_LENGTH + min_gap),
size=(num_vehicles,)))
# Append to each position the min_gap value.
pos += (VEHICLE_LENGTH + min_gap) * np.arange(num_vehicles)
# no initial speed (0 m/s)
vel = np.array([0. for _ in range(num_vehicles)])
else:
# Choose from the available initial states.
pos_vel = random.choice(initial_state[str(length)])
pos = np.array([pv[0] for pv in pos_vel])
vel = np.array([pv[1] for pv in pos_vel])
return pos, vel
def _update_state(self, pos, vel, accel):
"""Update the positions and speeds of all vehicles.
Parameters
----------
pos : array_like
positions of all vehicles in the network
vel : array_like
speeds of all vehicles in the network
accel : array_like
accelerations of all vehicles in the network
Returns
-------
array_like
the updated vehicle positions
array_like
the updated vehicle speeds
"""
new_vel = vel + accel * self.dt
new_pos = np.mod(
pos + vel * self.dt + 0.5 * accel * self.dt ** 2, self.length)
return new_pos, new_vel
def _compute_headway(self):
"""Compute the current step headway for all vehicles."""
# compute the individual headways
headway = np.append(
self.positions[1:] - self.positions[:-1] - VEHICLE_LENGTH,
self.positions[0] - self.positions[-1] - VEHICLE_LENGTH)
# dealing with wraparound
headway[np.argmax(self.positions)] += self.length
return headway
def _get_accel(self, vel, h):
"""Compute the accelerations of individual vehicles.
The acceleration values are dictated by the Intelligent Driver Model
(IDM), which car-following parameters specified in __init__.
Parameters
----------
vel : array_like
speeds of all vehicles in the network
h : array_like
bumper-to-bumper gaps of all vehicles in the network
Returns
-------
array_like
vehicle accelerations
"""
lead_vel = np.append(vel[1:], vel[0])
s_star = self.s0 + np.clip(
vel * self.T + np.multiply(vel, vel - lead_vel) /
(2 * np.sqrt(self.a * self.b)),
a_min=0,
a_max=np.inf,
)
accel = self.a * (
1 - np.power(vel/self.v0, self.delta) - np.power(s_star/h, 2))
noise = np.random.normal(0, self.noise, self.num_vehicles)
accel_max = self._failsafe(np.arange(self.num_vehicles))
accel_min = - vel / self.dt
accel = np.clip(accel + noise, a_max=accel_max, a_min=accel_min)
return accel
def _failsafe(self, veh_ids):
"""Compute the failsafe maximum acceleration.
Parameters
----------
veh_ids : array_like
the IDs of vehicles whose failsafe actions should be computed
Returns
-------
array_like
maximum accelerations
"""
lead_vel = self.speeds[(veh_ids + 1) % self.num_vehicles]
h = self.headways[veh_ids]
# how much we can reduce the speed in each time step
speed_reduction = self.decel * self.dt
# how many steps to get the speed to zero
steps_to_zero = np.round(lead_vel / speed_reduction)
brake_distance = self.dt * (
np.multiply(steps_to_zero, lead_vel) -
0.5 * speed_reduction * np.multiply(steps_to_zero, steps_to_zero+1)
)
brake_distance = h + brake_distance - self.min_gap
indx_nonzero = brake_distance > 0
brake_distance = brake_distance[indx_nonzero]
v_safe = np.zeros(len(veh_ids))
s = self.dt
t = self.delay
# h = the distance that would be covered if it were possible to
# stop exactly after gap and decelerate with max_deaccel every
# simulation step
sqrt_quantity = np.sqrt(
((s * s)
+ (4.0 * ((s * (2.0 * brake_distance / speed_reduction - t))
+ (t * t))))) * -0.5
n = np.floor(.5 - ((t + sqrt_quantity) / s))
h = 0.5 * n * (n-1) * speed_reduction * s + n * speed_reduction * t
assert all(h <= brake_distance + 1e-6)
# compute the additional speed that must be used during deceleration to
# fix the discrepancy between g and h
r = (brake_distance - h) / (n * s + t)
x = n * speed_reduction + r
assert all(x >= 0)
v_safe[indx_nonzero] = x
max_accel = (v_safe - self.speeds[veh_ids]) / self.dt
return max_accel
def get_state(self):
"""Compute the environment reward.
This is defined by the child classes.
"""
return []
def compute_reward(self, action):
"""Compute the environment reward.
This is defined by the child classes.
"""
return 0
def step(self, action):
"""Advance the simulation by one step."""
collision = False
done = False
for _ in range(self.sims_per_step):
self.t += 1
# Compute the accelerations.
self.accelerations = self._get_accel(self.speeds, self.headways)
if self.rl_ids is not None and action is not None:
# Compute the accelerations for RL vehicles.
self.accelerations[self.rl_ids] = get_rl_accel(
accel=action,
vel=self.speeds[self.rl_ids],
max_accel=self.max_accel,
dt=self.dt,
)
# Clip by safe, non-negative bounds.
accel_min = - self.speeds[self.rl_ids] / self.dt
accel_max = self._failsafe(self.rl_ids)
self.accelerations[self.rl_ids] = np.clip(
self.accelerations[self.rl_ids],
a_max=accel_max,
a_min=accel_min)
# Update the speeds, positions, and headways.
self.positions, self.speeds = self._update_state(
pos=self.positions,
vel=self.speeds,
accel=self.accelerations,
)
self.headways = self._compute_headway()
if self.gen_emission:
data = {"t": self.t}
data.update({
"pos_{}".format(i): self.positions[i]
for i in range(self.num_vehicles)
})
data.update({
"vel_{}".format(i): self.speeds[i]
for i in range(self.num_vehicles)
})
self._emission_data.append(data)
# Determine whether the rollout is done.
collision = any(self.headways < 0)
done = (self.t >= (self.warmup_steps + self.horizon)
* self.sims_per_step) or collision
if done:
break
if collision:
print("Collision")
info = {}
if self.t > self.warmup_steps * self.sims_per_step:
speed = | np.mean(self.speeds) | numpy.mean |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for opening arms until they are not in contact with a prop."""
import contextlib
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
_MAX_IK_ATTEMPTS = 100
_IK_MAX_CORRECTION_WEIGHT = 0.1
_JOINT_LIMIT_TOLERANCE = 1e-4
_GAP_TOLERANCE = 0.1
class _ArmPropContactRemover(object):
"""Helper class for removing contacts between an arm and a prop via IK."""
def __init__(self, physics, arm_root, prop, gap):
arm_geoms = arm_root.find_all('geom')
self._arm_geom_ids = set(physics.bind(arm_geoms).element_id)
arm_joints = arm_root.find_all('joint')
self._arm_joint_ids = list(physics.bind(arm_joints).element_id)
self._arm_qpos_indices = physics.model.jnt_qposadr[self._arm_joint_ids]
self._arm_dof_indices = physics.model.jnt_dofadr[self._arm_joint_ids]
self._prop_geoms = prop.find_all('geom')
self._prop_geom_ids = set(physics.bind(self._prop_geoms).element_id)
self._arm_joint_min = np.full(len(self._arm_joint_ids), float('-inf'),
dtype=physics.model.jnt_range.dtype)
self._arm_joint_max = np.full(len(self._arm_joint_ids), float('inf'),
dtype=physics.model.jnt_range.dtype)
for i, joint_id in enumerate(self._arm_joint_ids):
if physics.model.jnt_limited[joint_id]:
self._arm_joint_min[i], self._arm_joint_max[i] = (
physics.model.jnt_range[joint_id])
self._gap = gap
def _contact_pair_is_relevant(self, contact):
set1 = self._arm_geom_ids
set2 = self._prop_geom_ids
return ((contact.geom1 in set1 and contact.geom2 in set2) or
(contact.geom2 in set1 and contact.geom1 in set2))
def _forward_and_find_next_contact(self, physics):
"""Forwards the physics and finds the next contact to handle."""
physics.forward()
next_contact = None
for contact in physics.data.contact:
if (self._contact_pair_is_relevant(contact) and
(next_contact is None or contact.dist < next_contact.dist)):
next_contact = contact
return next_contact
def _remove_contact_ik_iteration(self, physics, contact):
"""Performs one linearized IK iteration to remove the specified contact."""
if contact.geom1 in self._arm_geom_ids:
sign = -1
geom_id = contact.geom1
else:
sign = 1
geom_id = contact.geom2
body_id = physics.model.geom_bodyid[geom_id]
normal = sign * contact.frame[:3]
jac_dtype = physics.data.qpos.dtype
jac = np.empty((6, physics.model.nv), dtype=jac_dtype)
jac_pos, jac_rot = jac[:3], jac[3:]
mjbindings.mjlib.mj_jacPointAxis(
physics.model.ptr, physics.data.ptr,
jac_pos, jac_rot,
contact.pos + (contact.dist / 2) * normal, normal, body_id)
# Calculate corrections w.r.t. all joints, disregarding joint limits.
delta_xpos = normal * max(0, self._gap - contact.dist)
jac_all_joints = jac_pos[:, self._arm_dof_indices]
update_unfiltered = np.linalg.lstsq(
jac_all_joints, delta_xpos, rcond=None)[0]
# Filter out joints at limit that are corrected in the "wrong" direction.
initial_qpos = | np.array(physics.data.qpos[self._arm_qpos_indices]) | numpy.array |
import copy
from typing import Any
import numpy as np
import pytest
from neuraxle.base import BaseStep
from neuraxle.steps.column_transformer import ColumnTransformer
class MultiplyBy2(BaseStep):
def __init__(self):
BaseStep.__init__(self)
self.fitted_data = []
def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':
self._add_fitted_data(data_inputs, expected_outputs)
return self
def fit_transform(self, data_inputs, expected_outputs=None) -> ('BaseStep', Any):
self._add_fitted_data(data_inputs, expected_outputs)
return self, self.transform(data_inputs)
def _add_fitted_data(self, data_inputs, expected_outputs):
self.fitted_data.append((copy.deepcopy(data_inputs), copy.deepcopy(expected_outputs)))
def transform(self, data_inputs):
return (2 * np.array(data_inputs)).tolist()
class ColumnChooserTestCase:
def __init__(
self,
data_inputs,
expected_processed_outputs,
column_transformer_tuple_list,
n_dimension,
expected_step_key=None,
expected_fitted_data=None,
expected_outputs=None
):
self.n_dimension = n_dimension
self.expected_step_key = expected_step_key
self.data_inputs = data_inputs
self.expected_outputs = expected_outputs
self.expected_fitted_data = expected_fitted_data
self.expected_processed_outputs = expected_processed_outputs
self.column_transformer_tuple_list = column_transformer_tuple_list
# 2d
test_case_index_int_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[2],
[22],
[42]
]),
expected_fitted_data=[(
[[1], [11], [21]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)],
expected_step_key='1_MultiplyBy2',
column_transformer_tuple_list=[
(1, MultiplyBy2())
],
n_dimension=2
)
test_case_index_start_end_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[0, 2],
[20, 22],
[40, 42]
]),
expected_fitted_data=[(
[[0, 1], [10, 11], [20, 21]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)],
expected_step_key='slice(0, 2, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(0, 2), MultiplyBy2())
],
n_dimension=2
)
test_case_index_range_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[0, 2],
[20, 22],
[40, 42]
]),
expected_fitted_data=[(
[[0, 1], [10, 11], [20, 21]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)],
expected_step_key='range(0, 2)_MultiplyBy2',
column_transformer_tuple_list=[
(range(2), MultiplyBy2())
],
n_dimension=2
)
test_case_index_start_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[2, 4, 6],
[22, 24, 26],
[42, 44, 46]
]),
expected_fitted_data=[(
[[1, 2, 3], [11, 12, 13], [21, 22, 23]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)],
expected_step_key='slice(1, None, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(1, None), MultiplyBy2())
],
n_dimension=2
)
test_case_index_end_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[0, 2],
[20, 22],
[40, 42]
]),
expected_fitted_data=[(
[[0, 1], [10, 11], [20, 21]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)],
expected_step_key='slice(None, 2, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(None, 2), MultiplyBy2())
],
n_dimension=2
)
test_case_index_last_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[0, 2, 4],
[20, 22, 24],
[40, 42, 44]
]),
expected_fitted_data=[
(
[[0, 1, 2], [10, 11, 12], [20, 21, 22]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)
],
expected_step_key='slice(None, -1, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(None, -1), MultiplyBy2())
],
n_dimension=2
)
test_case_list_of_columns_2d = ColumnChooserTestCase(
data_inputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_outputs=np.array([
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]
]),
expected_processed_outputs=np.array([
[0, 4],
[20, 24],
[40, 44]
]),
expected_fitted_data=[(
[[0, 2], [10, 12], [20, 22]],
[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]]
)],
expected_step_key='[0, 2]_MultiplyBy2',
column_transformer_tuple_list=[
([0, 2], MultiplyBy2())
],
n_dimension=2
)
# 3d
test_case_index_int = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[2]],
[[22]],
[[42]]
]),
expected_fitted_data=[
([np.array([[1]]), np.array([[11]]), np.array([[21]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)
],
expected_step_key='1_MultiplyBy2',
column_transformer_tuple_list=[
(1, MultiplyBy2())
],
n_dimension=3
)
# test_case_index_int = ColumnChooserTestCase(
# data_inputs=np.array([[
# [0, 1, 2, 3],
# [10, 11, 12, 13],
# [20, 21, 22, 23]
# ]]),
# expected_outputs=np.array([[
# [0, 1, 2, 3],
# [10, 11, 12, 13],
# [20, 21, 22, 23]
# ]]),
# expected_processed_outputs=np.array([[
# [2],
# [22],
# [42]
# ]]),
# expected_fitted_data=[(
# [np.array([[1], [11], [21]])],
# [np.array([[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23]])]
# )],
# expected_step_key='1_MultiplyBy2',
# column_transformer_tuple_list=[
# (1, MultiplyBy2())
# ],
# n_dimension=3
# )
test_case_index_start_end = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[0, 2]],
[[20, 22]],
[[40, 42]]
]),
expected_fitted_data=[(
[np.array([[0, 1]]), np.array([[10, 11]]), np.array([[20, 21]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)],
expected_step_key='slice(0, 2, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(0, 2), MultiplyBy2())
],
n_dimension=3
)
test_case_index_range = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[0, 2]],
[[20, 22]],
[[40, 42]]
]),
expected_fitted_data=[(
[np.array([[0, 1]]), np.array([[10, 11]]), np.array([[20, 21]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)],
expected_step_key='range(0, 2)_MultiplyBy2',
column_transformer_tuple_list=[
(range(2), MultiplyBy2())
],
n_dimension=3
)
test_case_index_start = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[2, 4, 6]],
[[22, 24, 26]],
[[42, 44, 46]]
]),
expected_fitted_data=[(
[np.array([[1, 2, 3]]),
np.array([[11, 12, 13]]),
np.array([[21, 22, 23]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)],
expected_step_key='slice(1, None, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(1, None), MultiplyBy2())
],
n_dimension=3
)
test_case_index_end = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[0, 2]],
[[20, 22]],
[[40, 42]]
]),
expected_fitted_data=[(
[np.array([[0, 1]]),
np.array([[10, 11]]),
np.array([[20, 21]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)],
expected_step_key='slice(None, 2, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(None, 2), MultiplyBy2())
],
n_dimension=3
)
test_case_index_last = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[0, 2, 4]],
[[20, 22, 24]],
[[40, 42, 44]]
]),
expected_fitted_data=[
(
[np.array([[0, 1, 2]]),
np.array([[10, 11, 12]]),
np.array([[20, 21, 22]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)
],
expected_step_key='slice(None, -1, None)_MultiplyBy2',
column_transformer_tuple_list=[
(slice(None, -1), MultiplyBy2())
],
n_dimension=3
)
test_case_list_of_columns = ColumnChooserTestCase(
data_inputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[0, 4]],
[[20, 24]],
[[40, 44]]
]),
expected_fitted_data=[(
[np.array([[0, 2]]),
np.array([[10, 12]]),
np.array([[20, 22]])],
[np.array([[0, 1, 2, 3]]),
np.array([[10, 11, 12, 13]]),
np.array([[20, 21, 22, 23]])]
)],
expected_step_key='[0, 2]_MultiplyBy2',
column_transformer_tuple_list=[
([0, 2], MultiplyBy2())
],
n_dimension=3
)
@pytest.mark.parametrize("test_case", [
copy.deepcopy(test_case_index_int),
copy.deepcopy(test_case_index_start_end),
copy.deepcopy(test_case_index_start),
copy.deepcopy(test_case_index_range),
copy.deepcopy(test_case_index_end),
copy.deepcopy(test_case_index_last),
copy.deepcopy(test_case_list_of_columns),
copy.deepcopy(test_case_index_int_2d),
copy.deepcopy(test_case_index_start_end_2d),
copy.deepcopy(test_case_index_start_2d),
copy.deepcopy(test_case_index_range_2d),
copy.deepcopy(test_case_index_end_2d),
copy.deepcopy(test_case_index_last_2d),
copy.deepcopy(test_case_list_of_columns_2d)
])
def test_column_transformer_transform_should_support_indexes(test_case: ColumnChooserTestCase):
data_inputs = test_case.data_inputs
column_transformer = ColumnTransformer(test_case.column_transformer_tuple_list, test_case.n_dimension)
outputs = column_transformer.transform(data_inputs)
assert np.array_equal(outputs, test_case.expected_processed_outputs)
@pytest.mark.parametrize("test_case", [
copy.deepcopy(test_case_index_int),
copy.deepcopy(test_case_index_start_end),
copy.deepcopy(test_case_index_start),
copy.deepcopy(test_case_index_range),
copy.deepcopy(test_case_index_end),
copy.deepcopy(test_case_index_last),
copy.deepcopy(test_case_list_of_columns),
copy.deepcopy(test_case_index_int_2d),
copy.deepcopy(test_case_index_start_end_2d),
copy.deepcopy(test_case_index_start_2d),
copy.deepcopy(test_case_index_range_2d),
copy.deepcopy(test_case_index_end_2d),
copy.deepcopy(test_case_index_last_2d),
copy.deepcopy(test_case_list_of_columns_2d)
])
def test_column_transformer_fit_transform_should_support_indexes(test_case: ColumnChooserTestCase):
data_inputs = test_case.data_inputs
expected_outputs = test_case.expected_outputs
p = ColumnTransformer(test_case.column_transformer_tuple_list, test_case.n_dimension)
p, outputs = p.fit_transform(data_inputs, expected_outputs)
assert np.array_equal(outputs, test_case.expected_processed_outputs)
actual_fitted_data = p[test_case.expected_step_key]['MultiplyBy2'].fitted_data
expected_fitted_data = test_case.expected_fitted_data
assert_data_fitted_properly(actual_fitted_data, expected_fitted_data)
@pytest.mark.parametrize("test_case", [
copy.deepcopy(test_case_index_int),
copy.deepcopy(test_case_index_start_end),
copy.deepcopy(test_case_index_start),
copy.deepcopy(test_case_index_range),
copy.deepcopy(test_case_index_end),
copy.deepcopy(test_case_index_last),
copy.deepcopy(test_case_list_of_columns),
copy.deepcopy(test_case_index_int_2d),
copy.deepcopy(test_case_index_start_end_2d),
copy.deepcopy(test_case_index_start_2d),
copy.deepcopy(test_case_index_range_2d),
copy.deepcopy(test_case_index_end_2d),
copy.deepcopy(test_case_index_last_2d),
copy.deepcopy(test_case_list_of_columns_2d)
])
def test_column_transformer_fit_should_support_indexes(test_case: ColumnChooserTestCase):
data_inputs = test_case.data_inputs
p = ColumnTransformer(test_case.column_transformer_tuple_list, test_case.n_dimension)
p = p.fit(data_inputs, test_case.expected_outputs)
actual_fitted_data = p[test_case.expected_step_key]['MultiplyBy2'].fitted_data
expected_fitted_data = test_case.expected_fitted_data
assert_data_fitted_properly(actual_fitted_data, expected_fitted_data)
def test_column_transformer_fit_should_support_multiple_tuples():
# Given
test_case = ColumnChooserTestCase(
data_inputs=np.array([
[[1, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_outputs=np.array([
[[0, 1, 2, 3]],
[[10, 11, 12, 13]],
[[20, 21, 22, 23]]
]),
expected_processed_outputs=np.array([
[[2, 2, 2, 3]],
[[20, 22, 12, 13]],
[[40, 42, 44, 46]]
]),
column_transformer_tuple_list=[
(slice(0, 2), MultiplyBy2()),
(2, MultiplyBy2())
],
n_dimension=3
)
data_inputs = test_case.data_inputs
p = ColumnTransformer(test_case.column_transformer_tuple_list, test_case.n_dimension)
# When
p = p.fit(data_inputs, test_case.expected_outputs)
# Then
actual_fitted_data = p['2_MultiplyBy2']['MultiplyBy2'].fitted_data
expected_fitted_data = [
([np.array([[2]]), np.array([[12]]), np.array([[22]])],
[ | np.array([[0, 1, 2, 3]]) | numpy.array |
"""
Sampler which uses a single centralized policy over a set of parallel environments.
This sampler is useful when using GPUs for the policy.
Used in POIS2.
"""
import time
import numpy as np
def traj_segment_generator(pi, env, n_episodes, horizon, stochastic, gamma):
"""
Returns a generator of complete rollouts. It need to be fed a vectorized
environment and a single policy.
"""
policy_time = 0
env_time = 0
# Initialize state variables
t = 0
ac = np.array([env.action_space.sample()] * env.num_envs)
_env_s = time.time()
ob = env.reset()
env_time += time.time() - _env_s
zero_ob = np.zeros(ob.shape)
current_indexes = np.arange(0, env.num_envs)
def filter_indexes(idx_vector, t_vector):
return map(list, zip(*[(i, v, t) for i,(v,t) in enumerate(zip(idx_vector, t_vector)) if v != -1]))
def has_ended(idx_vector):
return sum(idx_vector) == -len(idx_vector)
# Iterate to make yield continuous
while True:
_tt = time.time()
# Initialize history arrays
obs = np.array([[zero_ob[0] for _t in range(horizon)] for _e in range(n_episodes)])
rews = np.zeros((n_episodes, horizon), 'float32')
vpreds = np.zeros((n_episodes, horizon), 'float32')
news = np.zeros((n_episodes, horizon), 'int32')
acs = np.array([[ac[0] for _t in range(horizon)] for _e in range(n_episodes)])
prevacs = acs.copy()
mask = np.zeros((n_episodes, horizon), 'int32')
# Initialize indexes and timesteps
current_indexes = np.arange(0, env.num_envs)
current_timesteps = np.zeros((env.num_envs), dtype=np.int32)
# Set to -1 indexes if njobs > num_episodes
current_indexes[n_episodes:] = -1
# Indexes log: remember which indexes have been completed
indexes_log = list(current_indexes)
while not has_ended(current_indexes):
# Get the action and save the previous one
prevac = ac
_pi_s = time.time()
ac, vpred = pi.act(stochastic, ob)
policy_time += time.time() - _pi_s
# Filter the current indexes
ci_ob, ci_memory, ct = filter_indexes(current_indexes, current_timesteps)
# Save the current properties
obs[ci_memory, ct,:] = ob[ci_ob]
#vpreds[ci_memory, ct] = np.reshape(np.array(vpred), (-1,))[ci_ob]
acs[ci_memory, ct] = ac[ci_ob]
prevacs[ci_memory, ct] = prevac[ci_ob]
# Take the action
_env_s = time.time()
env.step_async(ac)
ob, rew, done, _ = env.step_wait()
env_time += time.time() - _env_s
# Save the reward
rews[ci_memory, ct] = rew[ci_ob]
mask[ci_memory, ct] = 1
news[ci_memory, ct] = np.reshape(np.array(done), (-1, ))[ci_ob]
# Update the indexes and timesteps
for i, d in enumerate(done):
if not d and current_timesteps[i] < (horizon-1):
current_timesteps[i] += 1
elif max(indexes_log) < n_episodes - 1:
current_timesteps[i] = 0 # Reset the timestep
current_indexes[i] = max(indexes_log) + 1 # Increment the index
indexes_log.append(current_indexes[i])
else:
current_indexes[i] = -1 # Disabling
# Add discounted reward (here is simpler)
gamma_log = np.log(np.full((horizon), gamma, dtype='float32'))
gamma_discounter = np.exp(np.cumsum(gamma_log))
discounted_reward = rews * gamma_discounter
total_time = time.time() - _tt
# Reshape to flatten episodes and yield
yield {'ob': | np.reshape(obs, (n_episodes * horizon,)+obs.shape[2:]) | numpy.reshape |
"""
Module for adjusting viewing geometry using ISIS3. This module requires that
the ISIS3 executables are in your PATH.
"""
import numpy as np
import pandas as pd
import quaternion, argparse, os, subprocess
"""
Compute the observer position given a ground point and distance
parameters
----------
ground_point : array
The ground point that will be viewed in body fixed X, Y, Z
as a numpy array.
distance : float
The distance from the center of the body to the observer in kilometers.
returns
-------
position : array
The observer position in body fixed X, Y, Z as a numpy array.
"""
def compute_position(ground_point, distance):
bf_position = np.array(distance / np.linalg.norm(ground_point)) * ground_point
return bf_position
"""
Compute the shortest rotation between two vectors.
parameters
----------
u : array
The first vector that the rotation will rotate to v.
v : array
The second vector that the rotation will rotate u to.
fallback : array
When the input vectors are exactly opposite, there is not a unique
shortest rotation between them. A 180 degree rotation about any axis
will work. If this parameter is entered, then it will be used as the
axis of rotation. Otherwise, the cross product of u and either the
x-axis or y-axis will be used for the axis of rotation.
returns
-------
rotation : quaternion
The output rotation from u to v.
"""
def rotation_between(u, v, fallback=None):
tolerance = 1e-10
if np.linalg.norm(u) < tolerance or np.linalg.norm(v) < tolerance:
return np.quaternion(1,0,0,0)
unit_u = u / np.linalg.norm(u)
unit_v = v / np.linalg.norm(v)
dot = | np.dot(unit_u,unit_v) | numpy.dot |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import numpy as np
import math
import copy
import random
import glob
import os
unit_size = 5
feature_dim = 2048 + 1024
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
# print inter_len,union_len
jaccard = | np.divide(inter_len, union_len) | numpy.divide |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import numpy
from pyscf import lib
from pyscf.fci import cistring
from pyscf.fci.addons import _unpack_nelec
librdm = lib.load_library('libfci')
######################################################
# Spin squared operator
######################################################
# S^2 = (S+ * S- + S- * S+)/2 + Sz * Sz
# S+ = \sum_i S_i+ ~ effective for all beta occupied orbitals.
# S- = \sum_i S_i- ~ effective for all alpha occupied orbitals.
# There are two cases for S+*S-
# 1) same electron \sum_i s_i+*s_i-, <CI|s_i+*s_i-|CI> gives
# <p|s+s-|q> \gammalpha_qp = trace(\gammalpha) = neleca
# 2) different electrons for \sum s_i+*s_j- (i\neq j, n*(n-1) terms)
# As a two-particle operator S+*S-
# = <ij|s+s-|kl>Gamma_{ik,jl} = <iajb|s+s-|kbla>Gamma_{iakb,jbla}
# = <ia|s+|kb><jb|s-|la>Gamma_{iakb,jbla}
# <CI|S+*S-|CI> = neleca + <ia|s+|kb><jb|s-|la>Gamma_{iakb,jbla}
#
# There are two cases for S-*S+
# 1) same electron \sum_i s_i-*s_i+
# <p|s+s-|q> \gammabeta_qp = trace(\gammabeta) = nelecb
# 2) different electrons
# = <ij|s-s+|kl>Gamma_{ik,jl} = <ibja|s-s+|kalb>Gamma_{ibka,jalb}
# = <ib|s-|ka><ja|s+|lb>Gamma_{ibka,jalb}
# <CI|S-*S+|CI> = nelecb + <ib|s-|ka><ja|s+|lb>Gamma_{ibka,jalb}
#
# Sz*Sz = Msz^2 = (neleca-nelecb)^2
# 1) same electron
# <p|ss|q>\gamma_qp = <p|q>\gamma_qp = (neleca+nelecb)/4
# 2) different electrons
# <ij|2s1s2|kl>Gamma_{ik,jl}/2
# =(<ia|ka><ja|la>Gamma_{iaka,jala} - <ia|ka><jb|lb>Gamma_{iaka,jblb}
# - <ib|kb><ja|la>Gamma_{ibkb,jala} + <ib|kb><jb|lb>Gamma_{ibkb,jblb})/4
# set aolst for local spin expectation value, which is defined as
# <CI|ao><ao|S^2|CI>
# For a complete list of AOs, I = \sum |ao><ao|, it becomes <CI|S^2|CI>
def spin_square_general(dm1a, dm1b, dm2aa, dm2ab, dm2bb, mo_coeff, ovlp=1):
r'''General spin square operator.
... math::
<CI|S_+*S_-|CI> &= n_\alpha + \delta_{ik}\delta_{jl}Gamma_{i\alpha k\beta ,j\beta l\alpha } \\
<CI|S_-*S_+|CI> &= n_\beta + \delta_{ik}\delta_{jl}Gamma_{i\beta k\alpha ,j\alpha l\beta } \\
<CI|S_z*S_z|CI> &= \delta_{ik}\delta_{jl}(Gamma_{i\alpha k\alpha ,j\alpha l\alpha }
- Gamma_{i\alpha k\alpha ,j\beta l\beta }
- Gamma_{i\beta k\beta ,j\alpha l\alpha}
+ Gamma_{i\beta k\beta ,j\beta l\beta})
+ (n_\alpha+n_\beta)/4
Given the overlap betwen non-degenerate alpha and beta orbitals, this
function can compute the expectation value spin square operator for
UHF-FCI wavefunction
'''
if isinstance(mo_coeff, numpy.ndarray) and mo_coeff.ndim == 2:
mo_coeff = (mo_coeff, mo_coeff)
# projected overlap matrix elements for partial trace
if isinstance(ovlp, numpy.ndarray):
ovlpaa = reduce(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[0]))
ovlpbb = reduce(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[1]))
ovlpab = reduce(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[1]))
ovlpba = reduce(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[0]))
else:
ovlpaa = numpy.dot(mo_coeff[0].T, mo_coeff[0])
ovlpbb = numpy.dot(mo_coeff[1].T, mo_coeff[1])
ovlpab = numpy.dot(mo_coeff[0].T, mo_coeff[1])
ovlpba = numpy.dot(mo_coeff[1].T, mo_coeff[0])
# if ovlp=1, ssz = (neleca-nelecb)**2 * .25
ssz = (numpy.einsum('ijkl,ij,kl->', dm2aa, ovlpaa, ovlpaa) -
numpy.einsum('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb) +
numpy.einsum('ijkl,ij,kl->', dm2bb, ovlpbb, ovlpbb) -
numpy.einsum('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb)) * .25
ssz += (numpy.einsum('ji,ij->', dm1a, ovlpaa) +
numpy.einsum('ji,ij->', dm1b, ovlpbb)) *.25
dm2abba = -dm2ab.transpose(0,3,2,1) # alpha^+ beta^+ alpha beta
dm2baab = -dm2ab.transpose(2,1,0,3) # beta^+ alpha^+ beta alpha
ssxy =(numpy.einsum('ijkl,ij,kl->', dm2baab, ovlpba, ovlpab) +
| numpy.einsum('ijkl,ij,kl->', dm2abba, ovlpab, ovlpba) | numpy.einsum |
import unittest
import numpy as np
from depth.convolution import convolve2d
class TestConvolution(unittest.TestCase):
def test_convolve2d(self):
data_tensor = np.ones((1, 1, 5, 5))
kernel_tensor = np.array([[
[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]])
result = np.array([[[
[-1, -1, -1, -1, -1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
]]])
convolution = convolve2d(data_tensor, kernel_tensor)
self.assertTrue( | np.array_equal(convolution, result) | numpy.array_equal |
import numpy as np
from mayavi import mlab
from mayavi.mlab import points3d, plot3d
from collections import namedtuple
import tqdm
OrbitConfig = namedtuple("OrbitConfig", ["mu",
"planet_radius",
"cross_section_area",
"drag_coefficient",
"density_function",
"ship_mass"])
class SimulateOrbit:
def __init__(self, config):
self.mu = config.mu
self.planet_radius = config.planet_radius
self.cross_section_area = config.cross_section_area
self.drag_coefficient = config.drag_coefficient
self.density_function = config.density_function
self.ship_mass = config.ship_mass
def run_simulation(self, start_position, start_velocity, h=1, time=100000):
position = np.array(start_position)
velocity = | np.array(start_velocity) | numpy.array |
### Cross comparison m2 & b5.
import geodata as gd
import numpy as np
import matplotlib as mpl
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import cartopy.crs as ccrs
import pystare as ps
from netCDF4 import Dataset
### MERRA2
m2_dataPath = "/home/mrilee/data/"
m2_dataFile = "MERRA2_300.tavg1_2d_slv_Nx.20051215.nc4"
m2_fqFilename = m2_dataPath+m2_dataFile
m2_ds = Dataset(m2_fqFilename)
print('keys: ',m2_ds.variables.keys())
m2_dLat = 0.5
m2_dLon = 5.0/8.0
m2_dLatkm = m2_dLat * gd.re_km/gd.deg_per_rad
m2_dLonkm = m2_dLon * gd.re_km/gd.deg_per_rad
m2_lat0=0
m2_lat1=361+1
m2_lon0=0
m2_lon1=576+1
m2_tim0=0
m2_tim1=23+1
m2_lat = m2_ds['lat'][m2_lat0:m2_lat1]
m2_lon = m2_ds['lon'][m2_lon0:m2_lon1]
m2_tim = m2_ds['time'][m2_tim0:m2_tim1]
m2_dataDayI = m2_ds['TQI'][m2_tim0:m2_tim1,m2_lat0:m2_lat1,m2_lon0:m2_lon1]
m2_dataDayL = m2_ds['TQL'][m2_tim0:m2_tim1,m2_lat0:m2_lat1,m2_lon0:m2_lon1]
m2_dataDayV = m2_ds['TQV'][m2_tim0:m2_tim1,m2_lat0:m2_lat1,m2_lon0:m2_lon1]
m2_dataDay = m2_dataDayI + m2_dataDayL + m2_dataDayV
m2_data = m2_dataDay[0,:,:].T
m2_latg,m2_long = np.meshgrid(m2_lat,m2_lon)
m2_latg_flat = m2_latg.flatten()
m2_long_flat = m2_long.flatten()
m2_data_flat = m2_data.flatten()
print([type(i) for i in [m2_latg_flat,m2_long_flat,gd.resolution(m2_dLonkm),m2_dLonkm]])
m2_indices = ps.from_latlon(m2_latg_flat,m2_long_flat,int(gd.resolution(m2_dLonkm)))
print('m2 lat.shape: ',m2_lat.shape)
print('m2 lon.shape: ',m2_lon.shape)
print('m2 tim.shape: ',m2_tim.shape)
print('m2 latg.shape: ',m2_latg.shape)
print('m2 long.shape: ',m2_long.shape)
print('m2 dataDay.shape: ',m2_dataDay.shape)
print('m2 data.shape: ',m2_data.shape)
print('m2 data flat shape:',m2_data_flat.shape)
print('m2 resolution: ',m2_dLonkm,gd.resolution(m2_dLonkm))
# Limit size for testing
cropped = True
if cropped:
# 1 box
# crop_lat=( 30.0, 31.0)
# crop_lon=(-150.0,-149.0)
# crop_lat=( 30.0, 32.0)
# crop_lon=(-164.0,-162.0)
# crop_lat=( 25.0, 30.0) # oops
# crop_lat=( 25.0, 35.0)
# crop_lon=(-160.0,-150.0)
crop_lat=( 27.0, 39.0)
crop_lon=(-161.5,-159.5)
# Good
# crop_lat=( 27.5, 32.5)
# crop_lon=(-162.5,-157.5)
m2_crop_idx = np.where(\
(m2_latg_flat > crop_lat[0]) & (m2_latg_flat < crop_lat[1]) & \
(m2_long_flat > crop_lon[0]) & (m2_long_flat < crop_lon[1]) )
m2_latg_flat = m2_latg_flat[m2_crop_idx]
m2_long_flat = m2_long_flat[m2_crop_idx]
m2_data_flat = m2_data_flat[m2_crop_idx]
m2_indices = m2_indices[m2_crop_idx]
print('m2 cropped length: ', m2_data_flat.size)
ar_threshold_kgom2 = 2.0/gd.precipitable_water_cm_per_kgom2
ar_threshold_kgom2 = 0.0
print('ar_threshold_kgom2: ',ar_threshold_kgom2)
m2_ar_idx = np.where(m2_data_flat >= ar_threshold_kgom2)
print('m2_ar size idx: ',len(m2_ar_idx[0]))
if len(m2_ar_idx[0]) is 0:
print('no m2_ar found!'); exit()
# print('m2 idx: ',m2_ar_idx)
m2_ar_data = m2_data_flat[m2_ar_idx]
m2_ar_indices = m2_indices[m2_ar_idx]
m2_ar_lat = m2_latg_flat[m2_ar_idx]
m2_ar_lon = m2_long_flat[m2_ar_idx]
print('m2_ar_lat mnmx: ',np.amin(m2_ar_lat),np.amax(m2_ar_lat))
print('m2_ar_lon mnmx: ', | np.amin(m2_ar_lon) | numpy.amin |
from typing import Tuple, Dict, Any, Union, Callable
import numpy as np
import scipy.ndimage as ndi
from common.exceptionmanager import catch_error_exception
from common.functionutil import ImagesUtil
from preprocessing.imagegenerator import ImageGenerator
_epsilon = 1e-6
class TransformRigidImages(ImageGenerator):
def __init__(self,
size_image: Union[Tuple[int, int, int], Tuple[int, int]],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
is_inverse_transform: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
super(TransformRigidImages, self).__init__(size_image, num_images=1)
if is_normalize_data:
if type_normalize_data == 'featurewise':
self._featurewise_center = True
self._featurewise_std_normalization = True
self._samplewise_center = False
self._samplewise_std_normalization = False
else:
# type_normalize_data == 'samplewise'
self._featurewise_center = False
self._featurewise_std_normalization = False
self._samplewise_center = True
self._samplewise_std_normalization = True
else:
self._featurewise_center = False
self._featurewise_std_normalization = False
self._samplewise_center = False
self._samplewise_std_normalization = False
self._is_zca_whitening = is_zca_whitening
self._zca_epsilon = 1e-6
self._rescale_factor = rescale_factor
self._preprocessing_function = preprocessing_function
self._mean = None
self._std = None
self._principal_components = None
self._is_inverse_transform = is_inverse_transform
self._initialize_gendata()
def update_image_data(self, in_shape_image: Tuple[int, ...]) -> None:
# self._num_images = in_shape_image[0]
pass
def _initialize_gendata(self) -> None:
self._transform_matrix = None
self._transform_params = None
self._count_trans_in_images = 0
def _update_gendata(self, **kwargs) -> None:
seed = kwargs['seed']
(self._transform_matrix, self._transform_params) = self._calc_gendata_random_transform(seed)
self._count_trans_in_images = 0
def _get_image(self, in_image: np.ndarray) -> np.ndarray:
is_type_input_image = (self._count_trans_in_images == 0)
self._count_trans_in_images += 1
return self._get_transformed_image(in_image, is_type_input_image=is_type_input_image)
def _get_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
in_image = np.expand_dims(in_image, axis=-1)
is_reshape_input_image = True
else:
is_reshape_input_image = False
in_image = self._calc_transformed_image(in_image, is_type_input_image=is_type_input_image)
if is_type_input_image:
in_image = self._standardize(in_image)
if is_reshape_input_image:
in_image = np.squeeze(in_image, axis=-1)
return in_image
def _get_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
if ImagesUtil.is_without_channels(self._size_image, in_image.shape):
in_image = np.expand_dims(in_image, axis=-1)
is_reshape_input_image = True
else:
is_reshape_input_image = False
if is_type_input_image:
in_image = self._standardize_inverse(in_image)
in_image = self._calc_inverse_transformed_image(in_image, is_type_input_image=is_type_input_image)
if is_reshape_input_image:
in_image = np.squeeze(in_image, axis=-1)
return in_image
def _calc_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
raise NotImplementedError
def _calc_inverse_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
raise NotImplementedError
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
raise NotImplementedError
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
raise NotImplementedError
def _standardize(self, in_image: np.ndarray) -> np.ndarray:
if self._preprocessing_function:
in_image = self._preprocessing_function(in_image)
if self._rescale_factor:
in_image *= self._rescale_factor
if self._samplewise_center:
in_image -= np.mean(in_image, keepdims=True)
if self._samplewise_std_normalization:
in_image /= (np.std(in_image, keepdims=True) + _epsilon)
template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \
'training data. Fit it first by calling \'fit(numpy_data)\'.'
if self._featurewise_center:
if self._mean is not None:
in_image -= self._mean
else:
message = template_message_error % ('featurewise_center')
catch_error_exception(message)
if self._featurewise_std_normalization:
if self._std is not None:
in_image /= (self._std + _epsilon)
else:
message = template_message_error % ('featurewise_std_normalization')
catch_error_exception(template_message_error % (message))
if self._is_zca_whitening:
if self._principal_components is not None:
flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:])))
whitex = np.dot(flatx, self._principal_components)
in_image = np.reshape(whitex, in_image.shape)
else:
message = template_message_error % ('zca_whitening')
catch_error_exception(message)
return in_image
def _standardize_inverse(self, in_image: np.ndarray) -> np.ndarray:
template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \
'training data. Fit it first by calling \'fit(numpy_data)\'.'
if self._is_zca_whitening:
if self._principal_components is not None:
flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:])))
inverse_principal_componens = np.divide(1.0, self._principal_components)
whitex = np.dot(flatx, inverse_principal_componens)
in_image = np.reshape(whitex, in_image.shape)
else:
message = template_message_error % ('zca_whitening')
catch_error_exception(message)
if self._featurewise_std_normalization:
if self._std is not None:
in_image *= self._std
else:
message = template_message_error % ('featurewise_std_normalization')
catch_error_exception(message)
if self._featurewise_center:
if self._mean is not None:
in_image += self._mean
else:
message = template_message_error % ('featurewise_center')
catch_error_exception(message)
if self._samplewise_std_normalization:
in_image *= np.std(in_image, keepdims=True)
if self._samplewise_center:
in_image += np.mean(in_image, keepdims=True)
if self._rescale_factor:
in_image /= self._rescale_factor
if self._preprocessing_function:
catch_error_exception('Not implemented inverse preprocessing function')
return in_image
@staticmethod
def _flip_axis(in_image: np.ndarray, axis: int) -> np.ndarray:
in_image = np.asarray(in_image).swapaxes(axis, 0)
in_image = in_image[::-1, ...]
in_image = in_image.swapaxes(0, axis)
return in_image
@staticmethod
def _apply_channel_shift(in_image: np.ndarray, intensity: int, channel_axis: int = 0) -> np.ndarray:
in_image = np.rollaxis(in_image, channel_axis, 0)
min_x, max_x = np.min(in_image), np.max(in_image)
channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in in_image]
in_image = np.stack(channel_images, axis=0)
in_image = np.rollaxis(in_image, 0, channel_axis + 1)
return in_image
def _apply_brightness_shift(self, in_image: np.ndarray, brightness: int) -> np.ndarray:
catch_error_exception('Not implemented brightness shifting option...')
# in_image = array_to_img(in_image)
# in_image = imgenhancer_Brightness = ImageEnhance.Brightness(in_image)
# in_image = imgenhancer_Brightness.enhance(brightness)
# in_image = img_to_array(in_image)
def get_text_description(self) -> str:
raise NotImplementedError
class TransformRigidImages2D(TransformRigidImages):
_img_row_axis = 0
_img_col_axis = 1
_img_channel_axis = 2
def __init__(self,
size_image: Tuple[int, int],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
rotation_range: float = 0.0,
width_shift_range: float = 0.0,
height_shift_range: float = 0.0,
brightness_range: Tuple[float, float] = None,
shear_range: float = 0.0,
zoom_range: Union[float, Tuple[float, float]] = 0.0,
channel_shift_range: float = 0.0,
fill_mode: str = 'nearest',
cval: float = 0.0,
horizontal_flip: bool = False,
vertical_flip: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
self._rotation_range = rotation_range
self._width_shift_range = width_shift_range
self._height_shift_range = height_shift_range
self._brightness_range = brightness_range
self._shear_range = shear_range
self._channel_shift_range = channel_shift_range
self._fill_mode = fill_mode
self._cval = cval
self._horizontal_flip = horizontal_flip
self._vertical_flip = vertical_flip
if np.isscalar(zoom_range):
self._zoom_range = (1 - zoom_range, 1 + zoom_range)
elif len(zoom_range) == 2:
self._zoom_range = (zoom_range[0], zoom_range[1])
else:
message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range))
catch_error_exception(message)
if self._brightness_range is not None:
if len(self._brightness_range) != 2:
message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range))
catch_error_exception(message)
super(TransformRigidImages2D, self).__init__(size_image,
is_normalize_data=is_normalize_data,
type_normalize_data=type_normalize_data,
is_zca_whitening=is_zca_whitening,
rescale_factor=rescale_factor,
preprocessing_function=preprocessing_function)
def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: rigid transformations
# 2nd: channel shift intensity / flipping
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
return in_image
def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: channel shift intensity / flipping
# 2nd: rigid transformations
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
return in_image
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_range:
theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range))
else:
theta = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if np.max(self._height_shift_range) < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if np.max(self._width_shift_range) < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._shear_range:
shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range))
else:
shear = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]
transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of inverse homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_range:
theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range))
else:
theta = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._shear_range:
shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range))
else:
shear = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, -tx],
[0, 1, -ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, np.tan(shear), 0],
[0, 1.0 / np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[1.0 / zx, 0, 0],
[0, 1.0 / zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]
transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w)
# ****************************************************
return (transform_matrix, transform_parameters)
@staticmethod
def _transform_matrix_offset_center(matrix: np.ndarray, x: int, y: int) -> np.ndarray:
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
@staticmethod
def _apply_transform(in_image: np.ndarray, transform_matrix: np.ndarray,
channel_axis: int = 0, fill_mode: str = 'nearest', cval: float = 0.0) -> np.ndarray:
in_image = np.rollaxis(in_image, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=1,
mode=fill_mode, cval=cval) for x_channel in in_image]
in_image = np.stack(channel_images, axis=0)
in_image = np.rollaxis(in_image, 0, channel_axis + 1)
return in_image
def get_text_description(self) -> str:
message = 'Rigid 2D transformations of images, with parameters...\n'
message += 'rotation (plane_XY) range: \'%s\'...\n' % (self._rotation_range)
message += 'shift (width, height) range: \'(%s, %s)\'...\n' \
% (self._width_shift_range, self._height_shift_range)
message += 'flip (horizontal, vertical): \'(%s, %s)\'...\n' \
% (self._horizontal_flip, self._vertical_flip)
message += 'zoom (min, max) range: \'(%s, %s)\'...\n' % (self._zoom_range[0], self._zoom_range[1])
message += 'shear (plane_XY) range: \'%s\'...\n' % (self._shear_range)
message += 'fill mode, when applied transformation: \'%s\'...\n' % (self._fill_mode)
return message
class TransformRigidImages3D(TransformRigidImages):
_img_dep_axis = 0
_img_row_axis = 1
_img_col_axis = 2
_img_channel_axis = 3
def __init__(self,
size_image: Tuple[int, int, int],
is_normalize_data: bool = False,
type_normalize_data: str = 'samplewise',
is_zca_whitening: bool = False,
rotation_xy_range: float = 0.0,
rotation_xz_range: float = 0.0,
rotation_yz_range: float = 0.0,
width_shift_range: float = 0.0,
height_shift_range: float = 0.0,
depth_shift_range: float = 0.0,
brightness_range: Tuple[float, float] = None,
shear_xy_range: float = 0.0,
shear_xz_range: float = 0.0,
shear_yz_range: float = 0.0,
zoom_range: Union[float, Tuple[float, float]] = 0.0,
channel_shift_range: float = 0.0,
fill_mode: str = 'nearest',
cval: float = 0.0,
horizontal_flip: bool = False,
vertical_flip: bool = False,
axialdir_flip: bool = False,
rescale_factor: float = None,
preprocessing_function: Callable[[np.ndarray], np.ndarray] = None
) -> None:
self._rotation_xy_range = rotation_xy_range
self._rotation_xz_range = rotation_xz_range
self._rotation_yz_range = rotation_yz_range
self._width_shift_range = width_shift_range
self._height_shift_range = height_shift_range
self._depth_shift_range = depth_shift_range
self._brightness_range = brightness_range
self._shear_xy_range = shear_xy_range
self._shear_xz_range = shear_xz_range
self._shear_yz_range = shear_yz_range
self._channel_shift_range = channel_shift_range
self._fill_mode = fill_mode
self._cval = cval
self._horizontal_flip = horizontal_flip
self._vertical_flip = vertical_flip
self._axialdir_flip = axialdir_flip
if np.isscalar(zoom_range):
self._zoom_range = (1 - zoom_range, 1 + zoom_range)
elif len(zoom_range) == 2:
self._zoom_range = (zoom_range[0], zoom_range[1])
else:
message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range))
catch_error_exception(message)
if self._brightness_range is not None:
if len(self._brightness_range) != 2:
message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range))
catch_error_exception(message)
super(TransformRigidImages3D, self).__init__(size_image,
is_normalize_data=is_normalize_data,
type_normalize_data=type_normalize_data,
is_zca_whitening=is_zca_whitening,
rescale_factor=rescale_factor,
preprocessing_function=preprocessing_function)
def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: rigid transformations
# 2nd: channel shift intensity / flipping
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_axialdir', False):
in_image = self._flip_axis(in_image, axis=self._img_dep_axis)
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
return in_image
def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray:
# Apply: 1st: channel shift intensity / flipping
# 2nd: rigid transformations
if is_type_input_image and (self._transform_params.get('brightness') is not None):
in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness'])
if self._transform_params.get('flip_axialdir', False):
in_image = self._flip_axis(in_image, axis=self._img_dep_axis)
if self._transform_params.get('flip_vertical', False):
in_image = self._flip_axis(in_image, axis=self._img_row_axis)
if self._transform_params.get('flip_horizontal', False):
in_image = self._flip_axis(in_image, axis=self._img_col_axis)
if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None):
in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'],
channel_axis=self._img_channel_axis)
if self._transform_matrix is not None:
in_image = self._apply_transform(in_image, self._transform_matrix,
channel_axis=self._img_channel_axis,
fill_mode=self._fill_mode, cval=self._cval)
return in_image
def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]:
# compute composition of homographies
if seed is not None:
np.random.seed(seed)
# ****************************************************
if self._rotation_xy_range:
angle_xy = np.deg2rad(np.random.uniform(-self._rotation_xy_range, self._rotation_xy_range))
else:
angle_xy = 0
if self._rotation_xz_range:
angle_xz = np.deg2rad(np.random.uniform(-self._rotation_xz_range, self._rotation_xz_range))
else:
angle_xz = 0
if self._rotation_yz_range:
angle_yz = np.deg2rad(np.random.uniform(-self._rotation_yz_range, self._rotation_yz_range))
else:
angle_yz = 0
if self._height_shift_range:
tx = np.random.uniform(-self._height_shift_range, self._height_shift_range)
if self._height_shift_range < 1:
tx *= self._size_image[self._img_row_axis]
else:
tx = 0
if self._width_shift_range:
ty = np.random.uniform(-self._width_shift_range, self._width_shift_range)
if self._width_shift_range < 1:
ty *= self._size_image[self._img_col_axis]
else:
ty = 0
if self._depth_shift_range:
tz = np.random.uniform(-self._depth_shift_range, self._depth_shift_range)
if self._depth_shift_range < 1:
tz *= self._size_image[self._img_dep_axis]
else:
tz = 0
if self._shear_xy_range:
shear_xy = np.deg2rad(np.random.uniform(-self._shear_xy_range, self._shear_xy_range))
else:
shear_xy = 0
if self._shear_xz_range:
shear_xz = np.deg2rad(np.random.uniform(-self._shear_xz_range, self._shear_xz_range))
else:
shear_xz = 0
if self._shear_yz_range:
shear_yz = np.deg2rad(np.random.uniform(-self._shear_yz_range, self._shear_yz_range))
else:
shear_yz = 0
if self._zoom_range[0] == 1 and self._zoom_range[1] == 1:
(zx, zy, zz) = (1, 1, 1)
else:
(zx, zy, zz) = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 3)
flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self._vertical_flip
flip_axialdir = (np.random.random() < 0.5) * self._axialdir_flip
channel_shift_intensity = None
if self._channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range)
brightness = None
if self._brightness_range is not None:
brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1])
transform_parameters = {'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'flip_axialdir': flip_axialdir,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
# ****************************************************
# ****************************************************
transform_matrix = None
if angle_xy != 0:
rotation_matrix = np.array([[1, 0, 0, 0],
[0, np.cos(angle_xy), -np.sin(angle_xy), 0],
[0, np.sin(angle_xy), np.cos(angle_xy), 0],
[0, 0, 0, 1]])
transform_matrix = rotation_matrix
if angle_xz != 0:
rotation_matrix = np.array([[np.cos(angle_xz), np.sin(angle_xz), 0, 0],
[-np.sin(angle_xz), np.cos(angle_xz), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
transform_matrix = \
rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix)
if angle_yz != 0:
rotation_matrix = np.array([[np.cos(angle_yz), 0, np.sin(angle_yz), 0],
[0, 1, 0, 0],
[-np.sin(angle_yz), 0, np.cos(angle_yz), 0],
[0, 0, 0, 1]])
transform_matrix = \
rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix)
if tx != 0 or ty != 0 or tz != 0:
shift_matrix = np.array([[1, 0, 0, tz],
[0, 1, 0, tx],
[0, 0, 1, ty],
[0, 0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear_xy != 0:
shear_matrix = np.array([[1, 0, 0, 0],
[0, 1, -np.sin(shear_xy), 0],
[0, 0, | np.cos(shear_xy) | numpy.cos |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# https://github.com/yiskw713/asrf/libs/postprocess.py
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
import math
class GaussianSmoothing(nn.Layer):
"""
Apply gaussian smoothing on a 1d tensor.
Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
"""
def __init__(self, kernel_size=15, sigma=1.0):
super().__init__()
self.kernel_size = kernel_size
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrid = paddle.arange(kernel_size)
meshgrid = paddle.cast(meshgrid, dtype='float32')
mean = (kernel_size - 1) / 2
kernel = kernel / (sigma * math.sqrt(2 * math.pi))
kernel = kernel * paddle.exp(-(((meshgrid - mean) / sigma)**2) / 2)
# Make sure sum of values in gaussian kernel equals 1.
# kernel = kernel / paddle.max(kernel)
self.kernel = paddle.reshape(kernel, [1, 1, -1])
def forward(self, inputs):
"""
Apply gaussian filter to input.
Arguments:
input (paddle.Tensor): Input to apply gaussian filter on.
Returns:
filtered (paddle.Tensor): Filtered output.
"""
_, c, _ = inputs.shape
inputs = F.pad(inputs,
pad=((self.kernel_size - 1) // 2,
(self.kernel_size - 1) // 2),
mode="reflect",
data_format='NCL')
kernel = paddle.expand(self.kernel, shape=[c, 1, self.kernel_size])
return F.conv1d(inputs, weight=kernel, groups=c)
def argrelmax(prob, threshold=0.7):
"""
Calculate arguments of relative maxima.
prob: np.array. boundary probability maps distributerd in [0, 1]
prob shape is (T)
ignore the peak whose value is under threshold
Return:
Index of peaks for each batch
"""
# ignore the values under threshold
prob[prob < threshold] = 0.0
# calculate the relative maxima of boundary maps
# treat the first frame as boundary
peak = np.concatenate(
[
np.ones((1), dtype=np.bool),
(prob[:-2] < prob[1:-1]) & (prob[2:] < prob[1:-1]),
np.zeros((1), dtype=np.bool),
],
axis=0,
)
peak_idx = np.where(peak)[0].tolist()
return peak_idx
def is_probability(x):
assert x.ndim == 3
if x.shape[1] == 1:
# sigmoid
if x.min() >= 0 and x.max() <= 1:
return True
else:
return False
else:
# softmax
_sum = np.sum(x, axis=1).astype(np.float32)
_ones = np.ones_like(_sum, dtype=np.float32)
return np.allclose(_sum, _ones)
def convert2probability(x):
"""
Args: x (N, C, T)
"""
assert x.ndim == 3
if is_probability(x):
return x
else:
if x.shape[1] == 1:
# sigmoid
prob = 1 / (1 + np.exp(-x))
else:
# softmax
prob = np.exp(x) / np.sum( | np.exp(x) | numpy.exp |
import math
import numpy as np
import scipy
import torch
from torch.autograd import Variable
import support.kernels as kernel_factory
from support.utilities.general_settings import Settings
from in_out.image_functions import points_to_voxels_transform, metric_to_image_radial_length
def create_regular_grid_of_points(box, spacing):
"""
Creates a regular grid of 2D or 3D points, as a numpy array of size nb_of_points x dimension.
box: (dimension, 2)
"""
dimension = Settings().dimension
axis = []
for d in range(dimension):
min = box[d, 0]
max = box[d, 1]
length = max - min
assert (length > 0)
offset = 0.5 * (length - spacing * math.floor(length / spacing))
axis.append(np.arange(min + offset, max + 1e-10, spacing))
if dimension == 1:
control_points = np.zeros((len(axis[0]), dimension))
control_points[:, 0] = axis[0].flatten()
elif dimension == 2:
x_axis, y_axis = | np.meshgrid(axis[0], axis[1]) | numpy.meshgrid |
"""
Module containing functions for transforming data
to be put into deep learning model
"""
import numpy as np
from fastai.vision import Image
from torch import FloatTensor
def fastai_image(img):
"""Turns numpy array into fastai Image object"""
img = FloatTensor(img)
img = img.permute(2, 0, 1)
return Image(img)
def make_3channel(img):
"""Ensures input image is a 3 channel image for training/predicting"""
img = np.reshape(img, (img.shape[0], img.shape[1], 1))
img = | np.concatenate((img, img, img), axis=2) | numpy.concatenate |
import numpy as np
import theano
import theano.tensor as T
from sfo.sfo import SFO
from skimage.filter import gabor_kernel
from skimage.transform import resize
def generate_data(ndim, nsamples, nfeatures):
"""Generate data by drawing samples that are a sparse combination of gabor features
"""
# build features
features = list()
for j in range(nfeatures):
theta = np.pi * np.random.rand()
sigma = 2*np.random.rand() + 1
freq = 0.2 * | np.random.rand() | numpy.random.rand |
import numpy as np
from scipy.optimize import curve_fit
from sklearn.decomposition import PCA
from sklearn.linear_model import RidgeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.model_selection import KFold
class NestedXval():
'''A generator for nested cross-validation that ensures that there is the
same number of trials for each class in training.
It is necessary to have the same number of trials in each category to
vectorize the training of the decoder so that the training of all 6
decoders (in one vs one scheme) is done simultaneously.
'''
def __init__(self, n_outer_splits=None):
'''Nested crossvalidation to get the same number of trials in each class for training'''
self.nouter = n_outer_splits
self.ninner = 2
self.outerxval = KFold(n_splits=n_outer_splits)
def split(self, targets):
'''Returns a generator that splits data in test, train and subspace
with the same number of trials in each category
'''
labels, counts = np.unique(targets, return_counts=True)
nclasses = len(labels)
if not np.all(counts[0] == counts) and max(counts) - min(counts) > 1:
raise ValueError("The number of trials in each class in not consistant")
interleaved_outer = np.concatenate(list(zip(*[np.where(targets == label)[0] for label in labels])))
leftovers = []
for iclass in np.where(np.min(counts) < counts)[0]:
leftovers.append(np.where(targets == labels[iclass])[0][-1])
interleaved_outer = np.concatenate((interleaved_outer, np.array(leftovers))).astype(int)
targets_ = targets[interleaved_outer]
outersplit = self.outerxval.split(targets)
for ioutsplit in range(self.nouter):
restinds, testinds = next(outersplit)
ntrain_per_class = np.ceil(len(restinds) / 2 / nclasses).astype(int)
inner_inds_by_class = [np.where(targets_[restinds] == label)[0] for label in labels]
traininds = np.concatenate(list(zip(*[restinds[classinds[:ntrain_per_class]] for classinds in inner_inds_by_class])))
subinds = np.concatenate([restinds[classinds[ntrain_per_class:]] for classinds in inner_inds_by_class])
testinds = interleaved_outer[testinds]
traininds = interleaved_outer[traininds]
subinds = interleaved_outer[subinds]
yield np.sort(testinds), np.sort(traininds), np.sort(subinds)
traininds = np.concatenate(list(zip(*[restinds[classinds[:-ntrain_per_class:-1]] for classinds in inner_inds_by_class])))
subinds = np.concatenate([restinds[classinds[-ntrain_per_class::-1]] for classinds in inner_inds_by_class])
testinds = interleaved_outer[testinds]
traininds = interleaved_outer[traininds]
subinds = interleaved_outer[subinds]
yield np.sort(testinds), np.sort(traininds), np.sort(subinds)
def sub_split(targets, trainind):
'''Cross-validation generator for the decoder and subspace trials
Function to split training trials in training and subspace trials, ensuring
that there is the same number of trials in each class for training.
Parameters
----------
targets : np.array - The targets (or y values)
trainind : np.array - The indices of the training trials
Returns
-------
Generator for each fold. Yields a tuple of np.array, one array for the
training trials and one array for the subspace
'''
targets = targets[trainind]
labels = np.unique(targets)
nclasses = len(labels)
ntrain_per_class = np.ceil(len(targets) / 2 / nclasses).astype(int)
inner_inds_by_class = [np.where(targets == label)[0] for label in labels]
ridgeind = np.concatenate(list(zip(*[classinds[:ntrain_per_class] for classinds in inner_inds_by_class])))
subind = np.concatenate([classinds[ntrain_per_class:] for classinds in inner_inds_by_class])
yield np.sort(trainind[ridgeind]), np.sort(trainind[subind])
ridgeind = np.concatenate(list(zip(*[classinds[:-ntrain_per_class:-1] for classinds in inner_inds_by_class])))
subind = np.concatenate([classinds[-ntrain_per_class::-1] for classinds in inner_inds_by_class])
yield np.sort(trainind[ridgeind]), np.sort(trainind[subind])
def combine_xval_folds(acc_fold):
'''Combine CTD cross-validation accuracies by averaging them
Parameters
----------
acc_fold : list of np.array<bins * bins> - The CTD accuracy matrices of all
the cross-validation folds
Returns
-------
np.array<bins * bins> - The averaged CTD accuracy
'''
return np.stack(acc_fold).mean(0)
def get_acc_mean(acc):
'''Averages all accuracy points in a CTD accuracy matrix
Parameters
----------
acc : np.array<bins * bins> - The CTD accuracy matrix
Returns
-------
float - The stability score
'''
return acc.mean()
def gaussian_func(x, mu, sigma, a):
'''A gaussian function
Parameters
----------
x : np.array - the x values to feed the function
mu : float - the mean of the gaussian
sigma : float - the standard deviation of the gaussian
a : float - a scaling coefficient
Returns
-------
The transformed values in a np.array for each value in x.
'''
b = .25
return a * np.exp(-(x-mu)**2/(2*sigma**2)) + b
def get_CT_score(acc, bounds, dstraining=None):
'''Get the "locality" score from a CTD accuracy
Fits a gaussian on each vector formed by training at a time bin and testing
at all time bins. Calculates the ratio between the maximum of the gaussian
divided by its standard deviation. Then averages all the ratios to get a
locality score.
Parameters
----------
acc : np.array<bins * bins> - The accuracy matrix of a CTD
bounds : a 2-element tuple of 3-element np.array - the bounds for
gaussian fitting for the locality score, e.g.
# mu sigma a
np.array([0, 2, 0]), # lower bounds
np.array([0, np.inf, 1])) # upper bounds
dstraining : int - if the CTD was trained on a subset of the time bins.
Every 'dstraining' bins have been selected for a down sampled training.
Returns
-------
Locality score
'''
if dstraining is None:
dstraining = 1
opted = []
nbinstrain, nbinstest = acc.shape
x = np.arange(nbinstest)
scores = np.empty(nbinstrain)
for ibintrain in range(nbinstrain):
data = acc[ibintrain, :]
data[data < .25] = .25
ibintest = dstraining - 1 + ibintrain * dstraining
params0 = [ibintest, 10, .5]
bounds[0][0] = np.max((ibintest - 5, 0))
bounds[1][0] = np.min((ibintest + 5, nbinstest))
try:
optparams = curve_fit(gaussian_func, x, data, params0, bounds=bounds)[0]
except RuntimeError:
optparams = [0, 0, 0]
scores[ibintrain] = 0
else:
max_val = np.max(gaussian_func(x, *optparams))
scores[ibintrain] = (max_val - .25) / optparams[1] * 1000
opted.append(optparams)
return np.mean(scores)
###############################################################################
################################## VECTORIZED #################################
###############################################################################
def vectorized_xval_CTD(X, y, population=None, permseed=None, subspace=True,
alpha=1, mask=None, dstraining=None):
'''Cross-validation of vectorized cross-temporal decoding
Cross-validation using a custom generator to ensure that the number of
trials in each class is identical.
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<ntrials> - targets
population : np.array of int - the indices of the neurons included
permseed : int - a seed for permutation testing
subspace : bool - whether to use a subspace or not
alpha : float - the ridge parameter
mask : np.array<nbins> of bool - which bins to take to build the subspace
dstraining : int - Every 'dstraining' bins will be selected for a down
sampled training
Returns
-------
accuracy : np.array<bins * bins> - the CTD accuracy averaged across folds
of the cross-validation
'''
acc_fold = []
if subspace:
nestedxval = NestedXval(n_outer_splits=5)
for testind, trainind, subind in nestedxval.split(y):
acc_fold.append(vectorized_CTD_job(X, y, trainind, testind, subind=subind, population=population,
permseed=permseed, alpha=alpha, mask=mask, dstraining=dstraining)[0])
else:
kfold = KFold(n_splits=5)
for trainind, testind in kfold.split(y):
acc_fold.append(vectorized_CTD_job(X, y, trainind, testind, population=population, permseed=permseed,
alpha=alpha, mask=mask, dstraining=dstraining)[0])
accuracy = combine_xval_folds(acc_fold)
return accuracy
def vectorized_sub_split(X, y, restind, testind,
population=None, permseed=None, subspace=True, alpha=1, mask=None):
'''Cross-validation of vectorized cross-temporal decoding (for testing)
Cross-validation of CTD with pre-defined testing trials. Used for testing,
when the testing trials have already been set aside and only the remaining
trials must be split into training and subspace trials
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<ntrials> - targets
restind : np.array - The indices of all the trials except the testing trials
testind : np.array - The indices of the testing trials
population : np.array of int - the indices of the neurons included
permseed : int - a seed for permutation testing
subspace : bool - whether to use a subspace or not
alpha : float - the ridge parameter
mask : np.array<nbins> of bool - which bins to take to build the subspace
Returns
-------
accuracy : np.array<bins * bins> - the CTD accuracy averaged across folds
of the cross-validation
'''
if subspace:
acc_split = []
for ridgeind, subind in sub_split(y, restind):
acc_split.append(vectorized_CTD_job(X, y, ridgeind, testind, subind=subind,
population=population, permseed=permseed, alpha=alpha, mask=mask)[0])
accuracy = combine_xval_folds(acc_split)
else:
accuracy = vectorized_CTD_job(X, y, restind, testind,
population=population, permseed=permseed, alpha=alpha, mask=mask)[0]
return accuracy
def vectorized_CTD_job(X, y, trainind, testind,
population=None, **kwargs):
'''Calling vectorized cross-temporal decoding with a given ensemble
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<ntrials> - targets
trainind : np.array - The indices of the training trials
testind : np.array - The indices of the testing trials
population : np.array of int - the indices of the neurons included
**kwargs : keyword arguments for function 'vectorized_CTD'
Returns
-------
accuracy : np.array<bins * bins> - The CTD matrix accuracy
testout : np.array<bins * bins * test trials> - The output of the
classifier for each pair of train and test bins, for each trial
'''
if population is None:
population = np.arange(X.shape[-1])
newX = X[..., population]
return vectorized_CTD(newX, y, trainind, testind, **kwargs)
def vectorized_CTD(X, y, trainind, testind,
alpha=1, subind=None, mask=None, permseed=None, dstraining=None):
'''Vectorized cross-temporal decoding
This is a vectorized version of the cross-temporal decoding algorithm. The
six decoders (in a one vs one scheme) are trained simultaneously thanks to
vectorization which considerably speeds up computations. Unfortunately it
makes the code less readable. The decoding algorithm was inspired by scikit
learn's implementation of ridge regression. Note that to be able to
vectorize training and testing, each class must have the same number of
training and testing trials.
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<ntrials> - targets
trainind : np.array
The indices of the training trials
testind : np.array
The indices of the testing trials
alpha : float - the ridge parameter
subind : np.array
The indices of trials used to define the subspace. If not None, a
subspace will be defined
mask : np.array<nbins> of bool - which bins to take to build the subspace
permseed : int - a seed for permutation testing, only the training trials
are shuffled
dstraining : int
Every 'dstraining' bins will be selected for a down sampled training
Returns
-------
accuracy : np.array<bins * bins> - The CTD matrix accuracy
testout : np.array<bins * bins * test trials> - The output of the
classifier for each pair of train and test bins, for each trial
'''
subspace = bool(subind is not None)
if dstraining is None:
dstraining = 1
ntrials, nbins, _ = X.shape
if mask is None:
mask = range(nbins)
labels = np.unique(y)
nclasses = len(labels)
Xtrain, Xtest = X[trainind], X[testind]
ytrain, ytest = y[trainind], y[testind]
if permseed is not None:
np.random.seed(permseed)
np.random.shuffle(ytrain)
if dstraining is not None:
Xtrain = Xtrain[:, dstraining-1::dstraining]
nbinstrain = Xtrain.shape[1]
else:
nbinstrain = nbins
Xtrain = Xtrain.transpose((1, 0, 2))
if subspace:
ysub = y[subind]
Xsub = X[:, mask][subind].mean(1) # Averaging over time bins
Xsub = np.stack([Xsub[ysub == label].mean(0) for label in labels])
subspace = PCA()
subspace.fit(Xsub)
Xtrain = (Xtrain - subspace.mean_[None, None, :]) @ subspace.components_.T[None, ...]
# We need to have the exact same number of trials for each class
_, traincounts = np.unique(ytrain, return_counts=True)
mintrials = np.min(traincounts)
if not np.all(traincounts[0] == traincounts):
mintrials = np.min(traincounts)
keptind = []
for iclass, count in enumerate(traincounts):
if count > mintrials:
inds = np.where(ytrain == labels[iclass])[0][:-(count-mintrials)]
else:
inds = np.where(ytrain == labels[iclass])[0]
keptind.append(inds)
keptind = np.concatenate(keptind)
else:
keptind = np.arange(len(ytrain))
ytrain_cut = ytrain[keptind]
Xtrain_cut = Xtrain[:, keptind]
nestimators = (nclasses * (nclasses - 1)) // 2
nsamples = mintrials * 2
nfeatures = Xtrain_cut.shape[-1]
ytrain_ = np.empty((nestimators, nsamples))
Xtrain_ = np.empty((nbinstrain, nestimators, nsamples, nfeatures))
k = 0
for c1 in range(nclasses):
for c2 in range(c1+1, nclasses):
cond = np.logical_or(ytrain_cut == c1, ytrain_cut == c2)
ytrain_[k, ytrain_cut[cond] == c1] = -1
ytrain_[k, ytrain_cut[cond] == c2] = 1
Xtrain_[:, k] = Xtrain_cut[:, cond]
k += 1
X_offset = Xtrain_.mean(2, keepdims=True)
Xtrain_ -= X_offset
if nfeatures > nsamples:
XXT = Xtrain_ @ Xtrain_.transpose((0, 1, 3, 2))
XXT = XXT + np.eye(XXT.shape[-1])[None, None, ...] * alpha
dual_coef = np.linalg.solve(XXT, ytrain_.reshape(1, nestimators, -1))
coefs = Xtrain_.transpose((0, 1, 3, 2)) @ dual_coef.reshape(dual_coef.shape[0], nestimators, -1, 1)
else:
XTX = Xtrain_.transpose((0, 1, 3, 2)) @ Xtrain_
Xy = Xtrain_.transpose((0, 1, 3, 2)) @ ytrain_.reshape((1, ytrain_.shape[0], -1, 1))
XTX = XTX + np.eye(XTX.shape[-1])[None, None, ...] * alpha
coefs = np.linalg.solve(XTX, Xy)
intercepts = - X_offset @ coefs
Xtest_ = Xtest.reshape(Xtest.shape[0] * Xtest.shape[1], Xtest.shape[2])
if subspace:
Xtest_ = subspace.transform(Xtest_)
scores = (Xtest_ @ coefs) + intercepts
scores = scores.reshape(scores.shape[:-1])
predictions = (scores > 0).astype(np.int)
nsamples = predictions.shape[-1]
predsT = predictions.transpose((0, 2, 1))
scoresT = scores.transpose((0, 2, 1))
votes = np.zeros((nbinstrain, nsamples, nclasses))
sum_of_confidences = np.zeros((nbinstrain, nsamples, nclasses))
k = 0
for i in range(nclasses):
for j in range(i + 1, nclasses):
sum_of_confidences[:, :, i] -= scoresT[:, :, k]
sum_of_confidences[:, :, j] += scoresT[:, :, k]
votes[predsT[:, :, k] == 0, i] += 1
votes[predsT[:, :, k] == 1, j] += 1
k += 1
transformed_confidences = (sum_of_confidences /
(3 * (np.abs(sum_of_confidences) + 1)))
preds = np.argmax(votes + transformed_confidences, 2)
preds = preds.reshape(nbinstrain, len(testind), nbins)
accuracy = (preds == ytest[None, :, None]).mean(1)
return accuracy, preds
def ensemble_mean_acc_vec(X, y, **kwargs):
'''Get stable score for a given ensemble
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<trials> - targets
population : np.array of int - the indices of the neurons included
alpha : float - the ridge parameter
subspace : bool - whether to use a subspace or not
Returns
-------
float - Stable score
'''
accuracy = vectorized_xval_CTD(X, y, **kwargs)
perf = get_acc_mean(accuracy)
return perf
def ensemble_dynamic_acc_vec(X, y, bounds, dstraining=None, **kwargs):
'''Get "locality" or dynamic score for a given ensemble
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<trials> - targets
bounds : a 2-element tuple of 3-element np.array - the bounds
# mu sigma a
np.array([0, 2, 0]), # lower bounds
np.array([0, np.inf, 1])) # upper bounds
population : np.array of int - the indices of the neurons included
alpha : float - the ridge parameter
subspace : bool - whether to use a subspace or not
Returns
-------
float - Stable score
'''
if 'subspace' in kwargs and kwargs['subspace']:
raise ValueError("There is no subspace with dynamic ensembles")
accuracy = vectorized_xval_CTD(X, y, subspace=False, dstraining=dstraining, **kwargs)
score = get_CT_score(accuracy, bounds, dstraining=dstraining)
return score
def vectorized_xval_subset(X, y, trialinds=None, **kwargs):
'''Calling vectorized cross-validation of CTD with a subset of trials
Used to get the ridge parameters from the training trials only.
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<trials> - targets
trialinds : np.array of int - The indices of the included trials
**kwargs : see arguments for vectorized_xval_CTD
Returns
-------
np.array<bins * bins> - the CTD accuracy averaged across folds
of the cross-validation
'''
if trialinds is not None:
X = X[trialinds]
y = y[trialinds]
return vectorized_xval_CTD(X, y, **kwargs)
def get_ridge_param_CTD_vec(data, alpha_powers, subspace, perm_seed=None,
trialinds=None, client=None):
'''Get the ridge parameter for CTD
Parameters
----------
data : a tuple of arguments - It contains the following in that order
X : np.array<trials * bins * neurons> - data
y : np.array<trials> - targets
mask : np.array<bins> of bool - which bins to take to build the subspace
population : np.array of int - the indices of the neurons included
alpha_powers : np.array - the powers of ten of alpha values that will be
tested
subspace : bool - Whether to use a subspace for CTD
perm_seed : int - A permutation seed to shuffle labels
trialinds : np.array of int - The indices of the included trials
client : dask client - To perform the parameter exploration in parallel
Returns
-------
float or dask future (if parallel) - The ridge parameter that yields the
best decoding accuracy
'''
X, y, delaymask, pop = data
acc_alpha = []
for ap in alpha_powers:
alpha = 10.**ap
if client:
acc_alpha.append(client.submit(vectorized_xval_subset, X, y,
population=pop, permseed=perm_seed, subspace=subspace,
alpha=alpha, mask=delaymask, trialinds=trialinds))
else:
acc_alpha.append(vectorized_xval_subset(X, y, population=pop, permseed=perm_seed,
subspace=subspace, alpha=alpha, mask=delaymask,
trialinds=trialinds))
def get_mean(acc):
return np.mean(acc[delaymask][:, delaymask])
acc_alpha = client.map(get_mean, acc_alpha)
def best_alpha(acc_alpha):
return 10. ** alpha_powers[np.argmax(acc_alpha)]
if client:
alpha = client.submit(best_alpha, acc_alpha)
else:
alpha = best_alpha(acc_alpha)
return alpha
def train_test_CTD(data, params, stable, dynparams=None, client=None):
'''Generic function to test stable or dynamic ensembles
Parameters
----------
data : 2-element tuple containing (or their equivalent dask future if parallelized)
X : np.array<trials * bins * neurons*> - The firing rate activity for all
bins and trials
y : np.array<trials> - The identity of each trial
params : 3-element tuple containing
pop : np.array of int - the indices of the neurons included
alpha : float - the ridge parameter
subspace : bool - whether to use a subspace
stable : bool - whether to test for a stable (True) or dynamic (False)
ensemble
dynparams : 2-element tuple containing (ignored if stable is True)
bounds : a 2-element tuple of 3-element np.array - the bounds for
gaussian fitting for the locality score, e.g.
# mu sigma a
np.array([0, 2, 0]), # lower bounds
np.array([0, np.inf, 1])) # upper bounds
dstraining : int - Every 'dstraining' bins will be selected for a down
sampled training
client : dask client - A dask client to perform the computations in parallel
'''
if client is not None:
parallel = True
Xfold, yfold = data
pop, alpha, subspace = params
if not stable:
bounds, dstraining = dynparams
if stable:
if parallel:
perf = client.submit(ensemble_mean_acc_vec, Xfold, yfold,
population=pop, alpha=alpha, subspace=subspace)
else:
perf = ensemble_mean_acc_vec(Xfold, yfold,
population=pop, alpha=alpha, subspace=subspace)
else:
if parallel:
perf = client.submit(ensemble_dynamic_acc_vec, Xfold, yfold, bounds,
population=pop, alpha=alpha, dstraining=dstraining)
else:
perf = ensemble_dynamic_acc_vec(Xfold, yfold, bounds,
population=pop, alpha=alpha,
dstraining=dstraining)
return perf
###############################################################################
################################ NON VECTORIZED ###############################
###############################################################################
# Non vectorized versions of the above functions
def decoding_subspace_xval_ridge(X, y, trainind, testind, alpha=1,
subspace=True, mask=None):
'''Decoding with cross-validation in subspace
Parameters
----------
X : list of np.arrays [ntrials] np.array<nbins*nneurons>
y : list of np.arrays [ntrials] np.array<nbins>
trainind : np.array
The indices of the training trials
testind : np.array
The indices of the testing trials
Returns
-------
correct : np.array<nbins*nbins> of Boolean's
True if the output is correct, False otherwise
testout : np.array<nbins*nbins*ntesttrials>
The output of the classifier for each pair of train and test bins
'''
sub_train_ratio = .5
nbins = X.shape[1]
labels = np.unique(y)
testout = np.empty((len(testind), nbins))
correct = np.empty(nbins)
if mask is None:
mask = range(nbins)
### Split subspace and training
if subspace:
nsubtrials = int(len(trainind)*sub_train_ratio)
subind, trainind = trainind[:nsubtrials], trainind[nsubtrials:]
ysub = y[subind]
Xsub = X[:, mask][subind].mean(1) # Averaging over time bins
Xsub = np.stack([Xsub[ysub == label].mean(0) for label in labels])
subspace = PCA()
subspace.fit(Xsub)
Xtrain, Xtest = X[trainind], X[testind]
ytrain, ytest = y[trainind], y[testind]
for ibin in range(nbins):
if subspace:
Xbintrain = subspace.transform(Xtrain[:, ibin])
else:
Xbintrain = Xtrain[:, ibin]
model = OneVsOneClassifier(RidgeClassifier(alpha=alpha, solver='cholesky'))
model.fit(Xbintrain, ytrain)
if subspace:
Xbintest = subspace.transform(Xtest[:, ibin])
else:
Xbintest = Xtest[:, ibin]
out = model.predict(Xbintest)
testout[:, ibin] = out
correct[ibin] = np.mean(out == ytest)
return correct, testout
def crosstemp_decoding_subspace_xval_ridge(X, y, indtrain, indtest,
alpha=1, indsub=None, mask=None):
'''Cross-temporal decoding with cross-validation in subspace
Parameters
----------
X : np.array<trials * bins * neurons> - data
y : np.array<trials> - targets
trainind : np.array - the indices of the training trials
testind : np.array - the indices of the testing trials
alpha : float - the L2 "ridge" regularization parameter
subind : np.array - the indices of the trials used to define the subspace.
They must be different from the training and testing indices.
mask : np.array<nbins> of bool - a mask to select which bins are used to
define the subspace. E.g.: np.array([False, False, True, True, False])
here only bins 2 and 3 are used to define the subspace.
Returns
-------
correct : np.array<bins * bins> of Boolean's
True if the output is correct, False otherwise
testout : np.array<bins * bins * test trials>
The output of the classifier for each pair of train and test bins
'''
assert len(set(indtrain) & set(indtest)) == 0
if indsub is not None:
subspace = True
assert len(set(indtrain) & set(indsub)) == 0
assert len(set(indtest) & set(indsub)) == 0
else:
subspace = False
nbins = X.shape[1]
labels = np.unique(y)
testout = np.empty((len(indtest), nbins, nbins))
correct = np.empty((nbins, nbins))
Xtrain, Xtest = X[indtrain], X[indtest]
ytrain, ytest = y[indtrain], y[indtest]
### Split subspace and training if subspace indices are provided
if subspace:
if mask is None:
mask = range(nbins)
ysub = y[indsub]
Xsub = X[:, mask][indsub].mean(1) # Averaging over time bins
Xsub = np.stack([Xsub[ysub == label].mean(0) for label in labels])
subspace = PCA()
subspace.fit(Xsub)
### A decoder is trained on each bin, and each decoder is tested on every bins
for itrain in range(nbins):
if subspace:
Xbintrain = subspace.transform(Xtrain[:, itrain])
else:
Xbintrain = Xtrain[:, itrain]
model = OneVsOneClassifier(RidgeClassifier(alpha=alpha, solver='cholesky'))
model.fit(Xbintrain, ytrain)
# The test data is reshaped to test all the bins in a single shot (much faster)
Xtest_ = Xtest.reshape(Xtest.shape[0] * Xtest.shape[1], Xtest.shape[2])
if subspace:
Xtest_ = subspace.transform(Xtest_)
preds = model.predict(Xtest_)
# The output is reshaped to the original shape of the test data
preds = preds.reshape(len(indtest), nbins)
accs = (preds == ytest[:, None]).mean(0)
testout[:, itrain, :] = preds
correct[itrain, :] = accs
return correct, testout
def job_CT(X, y, trainind, testind, population,
perm_seed=None, **kwargs):
poparray = np.array(population)
newX = X[:, :, poparray]
if perm_seed:
np.random.seed(perm_seed)
np.random.shuffle(newX)
result = crosstemp_decoding_subspace_xval_ridge(newX, y, trainind, testind, **kwargs)[0]
return result
def xval_job(data, client, pop,subspace=True, **kwargs):
X, y, ntrials = data
kfold = KFold(n_splits=5)
twofold = KFold(n_splits=2)
acc_fold = []
for trainind, testind in kfold.split(range(ntrials)):
if subspace:
acc_sub_split = []
for ridgeindind, subindind in twofold.split(trainind):
ridgeind, subind = trainind[ridgeindind], trainind[subindind]
acc_fold.append(job_CT(X, y, ridgeind, testind, pop,
subind=subind, **kwargs))
acc_fold.append(combine_xval_folds(acc_sub_split))
else:
acc_fold.append(job_CT(X, y, trainind, testind, pop, **kwargs))
accuracy = combine_xval_folds(acc_fold)
return accuracy
def submit_xval_jobs(data, client, pop,
subspace=True, alpha=1, **kwargs):
Xfut, yfut, ntrials = data
kfold = KFold(n_splits=5)
twofold = KFold(n_splits=2)
acc_fold = []
for trainind, testind in kfold.split(range(ntrials)):
if subspace:
acc_sub_split = []
for ridgeindind, subindind in twofold.split(trainind):
ridgeind, subind = trainind[ridgeindind], trainind[subindind]
# acc_fold.append(job_CT(Xfut.result(), yfut.result(), ridgeind, testind, pop,
# subind=subind, alpha=alpha.result(), **kwargs))
acc_sub_split.append(client.submit(job_CT, Xfut, yfut, ridgeind, testind, pop,
subind=subind, alpha=alpha, **kwargs))
acc_fold.append(client.submit(combine_xval_folds, acc_sub_split))
else:
acc_fold.append(client.submit(job_CT, Xfut, yfut, trainind, testind, pop, **kwargs))
accuracy = client.submit(combine_xval_folds, acc_fold)
return accuracy
def ensemble_mean_acc(data, client, *args, **kwargs):
'''Get stable score for a given ensemble
Parameters
----------
data : tuple (X, y, ntrials) - The data for decoding and the number of
trials in a tuple
client : the dask client
**kwargs : keyword arguments, contains
alpha : float - the L2 "ridge" regularization parameter
subspace : bool - whether to use a subspace
pop : np.array of int - the indices of the neurons included
Returns
-------
Stable score
'''
accuracy = submit_xval_jobs(data, client, *args, **kwargs)
return client.submit(get_acc_mean, accuracy)
def ensemble_dynamic_acc(data, client, *args, **kwargs):
'''Get "locality" or dynamic score for a given ensemble
Parameters
----------
data : tuple (X, y, ntrials) - The data for decoding and the number of
trials in a tuple
client : the dask client
**kwargs : keyword arguments, contains
alpha : float - the L2 "ridge" regularization parameter
subspace : bool - whether to use a subspace
pop : np.array of int - the indices of the neurons included
Returns
-------
Stable score
'''
accuracy = submit_xval_jobs(data, client, *args, **kwargs)
return client.submit(get_CT_score, accuracy)
def CT_permutations(data, client, subspace, alpha, nperms):
Xfut, yfut, ntrials, delaymask, pop = data
kfold = KFold(n_splits=10)
permseeds = | np.random.randint(0, 10**9, nperms) | numpy.random.randint |
import argparse
from itertools import count
import gym
import gym.spaces
import scipy.optimize
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from models.old_models import *
from replay_memory import Memory
from running_state import ZFilter
from torch.autograd import Variable
from trpo import trpo_step
from utils import *
from loss import *
import time
import swimmer
import walker
import halfcheetah
import pickle
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.995, metavar='G',
help='discount factor (default: 0.995)')
parser.add_argument('--env-name', type=str, default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--tau', type=float, default=0.97, metavar='G',
help='gae (default: 0.97)')
parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G',
help='l2 regularization regression (default: 1e-3)')
parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G',
help='max kl value (default: 1e-2)')
parser.add_argument('--damping', type=float, default=1e-1, metavar='G',
help='damping (default: 1e-1)')
parser.add_argument('--seed', type=int, default=1111, metavar='N',
help='random seed (default: 1111')
parser.add_argument('--batch-size', type=int, default=5000, metavar='N',
help='size of a single batch')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--eval-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--num-epochs', type=int, default=500, metavar='N',
help='number of epochs to train an expert')
parser.add_argument('--hidden-dim', type=int, default=64, metavar='H',
help='the size of hidden layers')
parser.add_argument('--lr', type=float, default=1e-3, metavar='L',
help='learning rate')
parser.add_argument('--vf-iters', type=int, default=30, metavar='V',
help='number of iterations of value function optimization iterations per each policy optimization step')
parser.add_argument('--vf-lr', type=float, default=3e-4, metavar='V',
help='learning rate of value network')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--xml', default=None, help='the xml configuration file')
parser.add_argument('--demo_files', nargs='+', help='the environment used for test')
parser.add_argument('--ratios', nargs='+', type=float, help='the ratio of demos to load')
parser.add_argument('--eval_epochs', type=int, default=10, help='the epochs for evaluation')
parser.add_argument('--save_path', help='the path to save model')
parser.add_argument('--feasibility_model', default=None, help='the path to the feasibility model')
parser.add_argument('--mode', help='the mode of feasibility')
parser.add_argument('--discount', type=float, default=0.9, help='the discount factor')
parser.add_argument('--distance_normalizer', type=float, default=5., help='the normalization factor for the distance')
args = parser.parse_args()
if args.seed == 1111:
log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'.txt', 'w')
save_path = args.save_path
else:
log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'_seed_{}.txt'.format(args.seed), 'w')
save_path = args.save_path.replace('.pth', '_seed_{}.pth'.format(args.seed))
env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)
f_env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
def load_demos(demo_files, ratios):
state_files = []
trajs = []
traj_traj_id = []
traj_id = 0
pair_traj_id = []
init_obs = []
for i in range(len(demo_files)):
state_pairs = []
demo_file = demo_files[i]
raw_demos = pickle.load(open(demo_file, 'rb'))
use_num = int(len(raw_demos['obs'])*ratios[i])
current_state = raw_demos['obs'][0:use_num]
next_state = raw_demos['next_obs'][0:use_num]
trajs += [np.array(traj) for traj in current_state]
if 'InvertedDoublePendulum' in str(type(env.env)):
init_obs += raw_demos['init_obs']
traj_traj_id += [i]*len(current_state)
for j in range(len(current_state)):
if 'Ant' in args.env_name:
state_pairs.append(np.concatenate([np.array(current_state[j])[:,2:], np.array(next_state[j])[:,2:]], axis=1))
pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))
else:
state_pairs.append(np.concatenate([np.array(current_state[j]), np.array(next_state[j])], axis=1))
pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))
traj_id += 1
state_files.append(np.concatenate(state_pairs, axis=0))
return state_files, trajs, np.concatenate(pair_traj_id, axis=0), np.array(traj_traj_id), init_obs
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
def compute_feasibility_pair(expert_trajs, models, f_env):
all_distance = []
for index in range(len(expert_trajs)):
expert_traj = expert_trajs[index]
model = models[index]
batch_size = 64
batch_num = (expert_traj.shape[0]-1)//batch_size + 1
with torch.no_grad():
for i in range(batch_num):
f_env.reset()
action_mean, _, action_std = model(torch.from_numpy(expert_traj[i*batch_size:(i+1)*batch_size, 2:num_inputs]))
action = torch.normal(action_mean, action_std).cpu().numpy()
next_states = []
for j in range(action_mean.shape[0]):
f_env.set_observation(expert_traj[i*batch_size+j])
next_state, _, _, _ = f_env.step(action[j])
next_states.append(next_state)
next_states = np.array(next_states)
distance = np.linalg.norm(expert_traj[i*batch_size:(i+1)*batch_size, num_inputs:] - next_states, ord=2, axis=1)
all_distance.append(distance)
all_distance = np.concatenate(all_distance, axis=0)
feasibility = np.exp(-all_distance/3.)
return feasibility
def compute_feasibility_traj(expert_trajs, traj_traj_id, models, f_env, init_obs):
all_distance = []
for index in range(len(expert_trajs)):
if index >= 4:
index = index % 2 + 2
all_distance.append([])
expert_traj = expert_trajs[index]
model = models[traj_traj_id[index]]
with torch.no_grad():
f_env.reset()
f_env.set_observation(expert_traj[0])
state0 = expert_traj[0]
state = expert_traj[0]
for j in range(expert_traj.shape[0]-1):
action_mean, _, action_std = model(torch.from_numpy(np.concatenate([state, state0], axis=0)).unsqueeze(0))
action = action_mean.cpu().numpy()
next_state, _, _, _ = f_env.step(action)
state = next_state
all_distance[-1].append(np.linalg.norm(expert_traj[j+1] - next_state, ord=2, axis=0)*(args.discount**j))
all_distance[-1] = np.sum(all_distance[-1])
all_distance = np.array(all_distance)
all_distance = (all_distance + np.max(-all_distance))/args.distance_normalizer
all_distance[all_distance>50] = 50.
feasibility = | np.exp(-all_distance) | numpy.exp |
import argparse
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
from tensorflow.python.ops.check_ops import assert_greater_equal_v2
import load_data
from tqdm import tqdm
import numpy as np
import pandas as pd
from math import e as e_VALUE
import tensorflow.keras.backend as Keras_backend
from sklearn.ensemble import RandomForestClassifier
from scipy.special import bdtrc
def func_CallBacks(Dir_Save=''):
mode = 'min'
monitor = 'val_loss'
# checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath= Dir_Save + '/best_model_weights.h5', monitor=monitor , verbose=1, save_best_only=True, mode=mode)
# Reduce_LR = tf.keras.callbacks.ReduceLROnPlateau(monitor=monitor, factor=0.1, min_delta=0.005 , patience=10, verbose=1, save_best_only=True, mode=mode , min_lr=0.9e-5 , )
# CSVLogger = tf.keras.callbacks.CSVLogger(Dir_Save + '/results.csv', separator=',', append=False)
EarlyStopping = tf.keras.callbacks.EarlyStopping( monitor = monitor,
min_delta = 0,
patience = 4,
verbose = 1,
mode = mode,
baseline = 0,
restore_best_weights = True)
return [EarlyStopping] # [checkpointer , EarlyStopping , CSVLogger]
def reading_terminal_inputs():
parser = argparse.ArgumentParser()
parser.add_argument("--epoch" , help="number of epochs")
parser.add_argument("--bsize" , help="batch size")
parser.add_argument("--max_sample" , help="maximum number of training samples")
parser.add_argument("--naug" , help="number of augmentations")
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
parser.add_argument("--architecture_name", help='architecture name')
args = parser.parse_args()
epoch = int(args.epoch) if args.epoch else 3
number_augmentation = int(args.naug) if args.naug else 3
bsize = int(args.bsize) if args.bsize else 100
max_sample = int(args.max_sample) if args.max_sample else 1000
architecture_name = str(args.architecture_name) if args.architecture_name else 'DenseNet121'
return epoch, bsize, max_sample, architecture_name, number_augmentation
def mlflow_settings():
"""
RUN UI with postgres and HPC:
REMOTE postgres server:
# connecting to remote server through ssh tunneling
ssh -L 5000:localhost:5432 <EMAIL>
# using the mapped port and localhost to view the data
mlflow ui --backend-store-uri postgresql://artinmajdi:1234@localhost:5000/chest_db --port 6789
RUN directly from GitHub or show experiments/runs list:
export MLFLOW_TRACKING_URI=http://127.0.0.1:5000
mlflow runs list --experiment-id <id>
mlflow run --no-conda --experiment-id 5 -P epoch=2 https://github.com/artinmajdi/mlflow_workflow.git -v main
mlflow run mlflow_workflow --no-conda --experiment-id 5 -P epoch=2
PostgreSQL server style
server = f'{dialect_driver}://{username}:{password}@{ip}/{database_name}' """
postgres_connection_type = { 'direct': ('5432', 'data7-db1.cyverse.org'),
'ssh-tunnel': ('5000', 'localhost')
}
port, host = postgres_connection_type['ssh-tunnel'] # 'direct' , 'ssh-tunnel'
username = "artinmajdi"
password = '<PASSWORD>'
database_name = "chest_db_v2"
dialect_driver = 'postgresql'
server = f'{dialect_driver}://{username}:{password}@{host}:{port}/{database_name}'
Artifacts = { 'hpc': 'sftp://mohammadsmajdi@file<EMAIL>iz<EMAIL>.<EMAIL>:/home/u29/mohammadsmajdi/projects/mlflow/artifact_store',
'data7_db1': 'sftp://[email protected]:/home/artinmajdi/mlflow_data/artifact_store'} # :temp2_data7_b
return server, Artifacts['data7_db1']
def architecture(architecture_name: str='DenseNet121', input_shape: list=[224,224,3], num_classes: int=14):
input_tensor=tf.keras.layers.Input(input_shape)
if architecture_name == 'custom':
model = tf.keras.layers.Conv2D(4, kernel_size=(3,3), activation='relu')(input_tensor)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(16, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(32, activation='relu')(model)
model = tf.keras.layers.Dense(num_classes , activation='softmax')(model)
return tf.keras.models.Model(inputs=model.input, outputs=[model])
else:
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
pooling='avg'
weights='imagenet'
include_top=False
if architecture_name == 'xception': model_architecture = tf.keras.applications.Xception
elif architecture_name == 'VGG16': model_architecture = tf.keras.applications.VGG16
elif architecture_name == 'VGG19': model_architecture = tf.keras.applications.VGG19
elif architecture_name == 'ResNet50': model_architecture = tf.keras.applications.ResNet50
elif architecture_name == 'ResNet50V2': model_architecture = tf.keras.applications.ResNet50V2
elif architecture_name == 'ResNet101': model_architecture = tf.keras.applications.ResNet101
elif architecture_name == 'ResNet101V2': model_architecture = tf.keras.applications.ResNet101V2
elif architecture_name == 'ResNet152': model_architecture = tf.keras.applications.ResNet152
elif architecture_name == 'ResNet152V2': model_architecture = tf.keras.applications.ResNet152V2
elif architecture_name == 'InceptionV3': model_architecture = tf.keras.applications.InceptionV3
elif architecture_name == 'InceptionResNetV2': model_architecture = tf.keras.applications.InceptionResNetV2
elif architecture_name == 'MobileNet': model_architecture = tf.keras.applications.MobileNet
elif architecture_name == 'MobileNetV2': model_architecture = tf.keras.applications.MobileNetV2
elif architecture_name == 'DenseNet121': model_architecture = tf.keras.applications.DenseNet121
elif architecture_name == 'DenseNet169': model_architecture = tf.keras.applications.DenseNet169
elif architecture_name == 'DenseNet201': model_architecture = tf.keras.applications.DenseNet201
elif int(list(tf.keras.__version__)[2]) >= 4:
if architecture_name == 'EfficientNetB0': model_architecture = tf.keras.applications.EfficientNetB0
elif architecture_name == 'EfficientNetB1': model_architecture = tf.keras.applications.EfficientNetB1
elif architecture_name == 'EfficientNetB2': model_architecture = tf.keras.applications.EfficientNetB2
elif architecture_name == 'EfficientNetB3': model_architecture = tf.keras.applications.EfficientNetB3
elif architecture_name == 'EfficientNetB4': model_architecture = tf.keras.applications.EfficientNetB4
elif architecture_name == 'EfficientNetB5': model_architecture = tf.keras.applications.EfficientNetB5
elif architecture_name == 'EfficientNetB6': model_architecture = tf.keras.applications.EfficientNetB6
elif architecture_name == 'EfficientNetB7': model_architecture = tf.keras.applications.EfficientNetB7
model = model_architecture( weights = weights,
include_top = include_top,
input_tensor = input_tensor,
input_shape = input_shape,
pooling = pooling) # ,classes=num_classes
KK = tf.keras.layers.Dense( num_classes, activation='sigmoid', name='predictions' )(model.output)
return tf.keras.models.Model(inputs=model.input,outputs=KK)
def weighted_bce_loss(W):
def func_loss(y_true,y_pred):
NUM_CLASSES = y_pred.shape[1]
loss = 0
for d in range(NUM_CLASSES):
y_true = tf.cast(y_true, tf.float32)
mask = tf.keras.backend.cast( tf.keras.backend.not_equal(y_true[:,d], -5),
tf.keras.backend.floatx() )
loss += W[d]*tf.keras.losses.binary_crossentropy( y_true[:,d] * mask,
y_pred[:,d] * mask )
return tf.divide( loss, tf.cast(NUM_CLASSES,tf.float32) )
return func_loss
def optimize(dir, train_dataset, valid_dataset, epochs, Info, architecture_name):
# architecture
model = architecture( architecture_name = architecture_name,
input_shape = list(Info.target_size) + [3] ,
num_classes = len(Info.pathologies) )
model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
loss = weighted_bce_loss(Info.class_weights), # tf.keras.losses.binary_crossentropy
metrics = [tf.keras.metrics.binary_accuracy] )
# optimization
history = model.fit( train_dataset,
validation_data = valid_dataset,
epochs = epochs,
steps_per_epoch = Info.steps_per_epoch,
validation_steps = Info.validation_steps,
verbose = 1,
use_multiprocessing = True) # ,callbacks=func_CallBacks(dir + '/model')
# saving the optimized model
model.save( dir + '/model/model.h5',
overwrite = True,
include_optimizer = False )
return model
def evaluate(dir: str, dataset: str='chexpert', batch_size: int=1000, model=tf.keras.Model()):
# Loading the data
Data, Info = load_data.load_chest_xray( dir = dir,
dataset = dataset,
batch_size = batch_size,
mode = 'test' )
score = measure_loss_acc_on_test_data( generator = Data.generator['test'],
model = model,
pathologies = Info.pathologies )
return score
def measure_loss_acc_on_test_data(generator, model, pathologies):
# Looping over all test samples
score_values = {}
NUM_CLASSES = len(pathologies)
generator.reset()
for j in tqdm(range(len(generator.filenames))):
x_test, y_test = next(generator)
full_path, x,y = generator.filenames[j] , x_test[0,...] , y_test[0,...]
x,y = x[np.newaxis,:] , y[np.newaxis,:]
# Estimating the loss & accuracy for instance
eval = model.evaluate(x=x, y=y,verbose=0,return_dict=True)
# predicting the labels for instance
pred = model.predict(x=x,verbose=0)
# Measuring the loss for each class
loss_per_class = [ tf.keras.losses.binary_crossentropy(y[...,d],pred[...,d]) for d in range(NUM_CLASSES)]
# saving all the infos
score_values[full_path] = {'full_path':full_path,'loss_avg':eval['loss'], 'acc_avg':eval['binary_accuracy'], 'pred':pred[0], 'pred_binary':pred[0] > 0.5, 'truth':y[0]>0.5, 'loss':np.array(loss_per_class), 'pathologies':pathologies}
# converting the outputs into panda dataframe
df = pd.DataFrame.from_dict(score_values).T
# resetting the index to integers
df.reset_index(inplace=True)
# # dropping the old index column
df = df.drop(['index'],axis=1)
return df
class Parent_Child():
def __init__(self, subj_info: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
"""
subject_info = {'pred':[], 'loss':[], 'pathologies':['Edema','Cardiomegaly',...]}
1. After creating a class:
SPC = Parent_Child(loss_dict, pred_dict, technique)
2. Update the parent child relationship:
SPC.set_parent_child_relationship(parent_name1, child_name_list1)
SPC.set_parent_child_relationship(parent_name2, child_name_list2)
3. Then update the loss and probabilities
SPC.update_loss_pred()
4. In order to see the updated loss and probabilities use below
loss_new_list = SPC.loss_dict_weighted or SPC.loss_list_weighted
pred_new_list = SPC.pred_dict_weighted or SPC.predlist_weighted
IMPORTANT NOTE:
If there are more than 2 generation; it is absolutely important to enter the subjects in order of seniority
gen1: grandparent (gen1)
gen1_subjx_children: parent (gen2)
gen2_subjx_children: child (gen3)
SPC = Parent_Child(loss_dict, pred_dict, technique)
SPC.set_parent_child_relationship(gen1_subj1, gen1_subj1_children)
SPC.set_parent_child_relationship(gen1_subj2, gen1_subj2_children)
. . .
SPC.set_parent_child_relationship(gen2_subj1, gen2_subj1_children)
SPC.set_parent_child_relationship(gen2_subj2, gen2_subj2_children)
. . .
SPC.update_loss_pred()
"""
self.subj_info = subj_info
self.technique = technique
self.all_parents: dict = {}
self.tuning_variables = tuning_variables
self.loss = subj_info.loss
self.pred = subj_info.pred
self.truth = subj_info.truth
self._convert_inputs_list_to_dict()
def _convert_inputs_list_to_dict(self):
self.loss_dict = {disease:self.subj_info.loss[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.pred_dict = {disease:self.subj_info.pred[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.truth_dict = {disease:self.subj_info.truth[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.loss_dict_weighted = self.loss_dict
self.pred_dict_weighted = self.pred_dict
def set_parent_child_relationship(self, parent_name: str='parent_name', child_name_list: list=[]):
self.all_parents[parent_name] = child_name_list
def update_loss_pred(self):
"""
techniques:
1: coefficinet = (1 + parent_loss)
2: coefficinet = (2 * parent_pred)
3: coefficient = (2 * parent_pred)
1: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
2: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
3. loss_new = loss_old * coefficient
"""
for parent_name in self.all_parents:
self._update_loss_for_children(parent_name)
self._convert_outputs_to_list()
def _convert_outputs_to_list(self):
self.loss_new = np.array([self.loss_dict_weighted[disease] for disease in self.subj_info.pathologies])
self.pred_new = np.array([self.pred_dict_weighted[disease] for disease in self.subj_info.pathologies])
def _update_loss_for_children(self, parent_name: str='parent_name'):
parent_loss = self.loss_dict_weighted[parent_name]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
TV = self.tuning_variables[ self.technique ]
if TV['mode'] == 'truth': parent_truth_pred = parent_truth
elif TV['mode'] == 'pred': parent_truth_pred = parent_pred
else: parent_truth_pred = 1.0
if self.technique == 1: coefficient = TV['weight'] * parent_loss + TV['bias']
elif self.technique == 2: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
elif self.technique == 3: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
for child_name in self.all_parents[parent_name]:
new_child_loss = self._measure_new_child_loss(coefficient, parent_name, child_name)
self.loss_dict_weighted[child_name] = new_child_loss
self.pred_dict_weighted[child_name] = 1 - np.power(e_VALUE , -new_child_loss)
self.pred_dict[child_name] = 1 - np.power(e_VALUE , -self.loss_dict[child_name])
def _measure_new_child_loss(self, coefficient: float=0.0, parent_name: str='parent_name', child_name: str='child_name'):
TV = self.tuning_variables[ self.technique ]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
if TV['mode'] == 'truth': loss_activated = (parent_truth < 0.5 )
elif TV['mode'] == 'pred': loss_activated = (parent_pred < TV['parent_pred_threshold'] )
else: loss_activated = True
old_child_loss = self.loss_dict_weighted[child_name]
if self.technique == 1: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 2: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 3: new_child_loss = old_child_loss * coefficient
return new_child_loss
class Measure_InterDependent_Loss_Aim1_1(Parent_Child):
def __init__(self,score: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
score['loss_new'] = score['loss']
score['pred_new'] = score['pred']
self.score = score
self.technique = technique
for subject_ix in tqdm(self.score.index):
Parent_Child.__init__(self, subj_info=self.score.loc[subject_ix], technique=technique, tuning_variables=tuning_variables)
self.set_parent_child_relationship(parent_name='Lung Opacity' , child_name_list=['Pneumonia', 'Atelectasis','Consolidation','Lung Lesion', 'Edema'])
self.set_parent_child_relationship(parent_name='Enlarged Cardiomediastinum', child_name_list=['Cardiomegaly'])
self.update_loss_pred()
self.score.loss_new.loc[subject_ix] = self.loss_new
self.score.pred_new.loc[subject_ix] = self.pred_new
def apply_new_loss_techniques_aim1_1(pathologies: list=[], score: pd.DataFrame.dtypes={}, tuning_variables: dict={}):
L = len(pathologies)
accuracies = | np.zeros((4,L)) | numpy.zeros |
"""
Determine continuum based on continuum mask
and fit best radial velocity to observation
"""
import logging
import warnings
import emcee
import numpy as np
from scipy.constants import speed_of_light
from scipy.interpolate import splev, splrep
from scipy.optimize import least_squares
from scipy.signal import correlate
from tqdm import tqdm
from .iliffe_vector import Iliffe_vector
from .sme_synth import SME_DLL
logger = logging.getLogger(__name__)
c_light = speed_of_light * 1e-3 # speed of light in km/s
class ContinuumNormalizationAbstract:
def __init__(self):
pass
def __call__(self, sme, x_syn, y_syn, segments, rvel=0):
raise NotImplementedError
def apply(self, wave, smod, cwave, cscale, segments):
return apply_continuum(wave, smod, cwave, cscale, self.cscale_type, segments)
class ContinuumNormalizationMask(ContinuumNormalizationAbstract):
def __call__(self, sme, x_syn, y_syn, segments, rvel=0):
"""
Fit a polynomial to the spectrum points marked as continuum
The degree of the polynomial fit is determined by sme.cscale_flag
Parameters
----------
sme : SME_Struct
input sme structure with sme.sob, sme.wave, and sme.mask
segment : int
index of the wavelength segment to use, or -1 when dealing with the whole spectrum
Returns
-------
cscale : array of size (ndeg + 1,)
polynomial coefficients of the continuum fit, in numpy order, i.e. largest exponent first
"""
if segments < 0:
return sme.cscale
if "spec" not in sme or "wave" not in sme:
# If there is no observation, we have no continuum scale
warnings.warn("Missing data for continuum fit")
cscale = [1]
elif sme.cscale_flag in ["none", -3]:
cscale = [1]
elif sme.cscale_flag in ["fix", -1, -2]:
# Continuum flag is set to no continuum
cscale = sme.cscale[segments]
else:
# fit a line to the continuum points
ndeg = sme.cscale_degree
# Extract points in this segment
x, y = sme.wave, sme.spec
if "mask" in sme:
m = sme.mask
else:
m = sme.spec.copy()
m[:] = sme.mask_value["line"]
if "uncs" in sme:
u = sme.uncs
else:
u = sme.spec.copy()
u[:] = 1
x, y, m, u = x[segments], y[segments], m[segments], u[segments]
# Set continuum mask
if | np.all(m != sme.mask_values["continuum"]) | numpy.all |
# code to calculate fundamental stellar parameters and distances using
# a "direct method", i.e. adopting a fixed reddening map and bolometric
# corrections
import astropy.units as units
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import RegularGridInterpolator
import pdb
def distance_likelihood(plx, plxe, ds):
"""Distance Likelihood
Likelihood of distance given measured parallax
Args:
plx (float): parallax
plxe (float): parallax uncertainty
ds (array): distance in parsecs
Returns:
array: likelihood (not log-likelihood)
"""
lh = ((1.0/(np.sqrt(2.0*np.pi)*plxe))
* np.exp( (-1.0/(2.0*plxe**2))*(plx - 1.0/ds)**2))
return lh
def distance_prior(ds, L):
"""Distance prior
Exponetial decreasing vol density prior
Returns:
array: prior probability (not log-prior)
"""
prior = ds**2/(2.0*L**3.0)*np.exp(-ds/L)
return prior
def stparas(input, dnumodel=-99, bcmodel=-99, dustmodel=-99, dnucor=-99,
useav=-99, plot=0, band='k', ext=-99):
# IAU XXIX Resolution, Mamajek et al. (2015)
r_sun = 6.957e10
gconst = 6.67408e-8
gm = 1.3271244e26
m_sun = gm/gconst
rho_sun = m_sun/(4./3.*np.pi*r_sun**3)
g_sun = gconst*m_sun/r_sun**2.
# solar constants
numaxsun = 3090.
dnusun = 135.1
teffsun = 5777.
Msun = 4.74 # NB this is fixed to MESA BCs!
# assumed uncertainty in bolometric corrections
err_bc=0.02
# assumed uncertainty in extinction
err_ext=0.02
# object containing output values
out = resdata()
## extinction coefficients
extfactors=ext
if (len(band) == 4):
bd=band[0:1]
else:
bd=band[0:2]
######################################
# case 1: input is parallax + colors #
######################################
#with h5py.File(bcmodel,'r') as h5:
teffgrid = bcmodel['teffgrid'][:]
logggrid = bcmodel['logggrid'][:]
fehgrid = bcmodel['fehgrid'][:]
avgrid = bcmodel['avgrid'][:]
bc_band = bcmodel['bc_'+bd][:]
if ((input.plx > 0.)):
# load up bolometric correction grid
# only K-band for now
points = (teffgrid,logggrid,fehgrid,avgrid)
values = bc_band
interp = RegularGridInterpolator(points,values)
### Monte Carlo starts here
# number of samples
nsample = int(1e5)
# length scale for exp decreasing vol density prior in pc
L = 1350.0
# maximum distance to sample (in pc)
maxdis = 1e5
# get a rough maximum and minimum distance
tempdis = 1.0/input.plx
tempdise = input.plxe/input.plx**2
maxds = tempdis + 5.0*tempdise
minds = tempdis - 5.0*tempdise
ds = | np.arange(1.0, maxdis, 1.0) | numpy.arange |
import numpy as np
from numpy import sin, cos, tan, pi, arcsin, arctan
from functools import lru_cache
import torch
from torch import nn
from torch.nn.parameter import Parameter
# Calculate kernels of SphereCNN
@lru_cache(None)
def get_xy(delta_phi, delta_theta):
return np.array([
[
(-tan(delta_theta), 1/cos(delta_theta)*tan(delta_phi)),
(0, tan(delta_phi)),
(tan(delta_theta), 1/cos(delta_theta)*tan(delta_phi)),
],
[
(-tan(delta_theta), 0),
(1, 1),
(tan(delta_theta), 0),
],
[
(-tan(delta_theta), -1/ | cos(delta_theta) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 17 14:45:00 2017
@author: shenda
class order: ['A', 'N', 'O', '~']
"""
import numpy as np
from sklearn import ensemble
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from OptF import OptF
from copy import deepcopy
import xgboost as xgb
import ReadData
import random
from collections import Counter
class MyXGB(object):
"""
bottom basic classifier, a warpper for xgboost
the proba order is ['N', 'A', 'O', '~']
"""
def __init__(self, n_estimators=5000,
max_depth=10,
subsample=0.85,
colsample_bytree=0.85,
min_child_weight=4,
num_round = 500):
self.param = {'learning_rate':0.1, 'eta':0.1, 'silent':1,
'objective':'multi:softprob', 'num_class': 4}
self.bst = None
self.num_round = num_round
self.pred = None
my_seed = random.randint(0, 1000)
self.param['n_estimators'] = n_estimators
self.param['max_depth'] = max_depth
self.param['subsample'] = subsample
self.param['colsample_bytree'] = colsample_bytree
self.param['min_child_weight'] = min_child_weight
# self.param['random_state'] = my_seed
self.param['seed'] = my_seed
self.param['n_jobs'] = -1
print(self.param.items())
print(self.num_round)
def fit(self, train_data, train_label):
train_label = ReadData.Label2Index(train_label)
dtrain = xgb.DMatrix(train_data, label=train_label)
self.bst = xgb.train(self.param, dtrain, num_boost_round=self.num_round)
def predict_prob(self, test_data):
dtest = xgb.DMatrix(test_data)
self.pred = self.bst.predict(dtest)
return self.pred
def predict(self, test_data):
pred_prob = self.predict_prob(test_data)
pred_num = np.argmax(pred_prob, axis=1)
pred = ReadData.Index2Label(pred_num)
return pred
def get_importance(self):
return self.bst.get_score(importance_type='gain')
def plot_importance(self):
xgb.plot_importance(self.bst)
class MyLR(object):
"""
Top level classifier, a warpper for Logistic Regression
"""
def __init__(self):
self.clf = LogisticRegression()
def fit(self,
train_qrs_data, train_qrs_label):
train_data =np.array(train_qrs_data)
train_label = train_qrs_label
self.clf.fit(train_data, train_label)
def predict(self, test_qrs_data):
test_data = np.array(test_qrs_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_qrs_data):
test_qrs_data = np.array(test_qrs_data)
if test_qrs_data.ndim == 1:
test_qrs_data = np.expand_dims(np.array(test_qrs_data), axis=0)
test_data = np.array(test_qrs_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_qrs_data)
return self.clf.predict_proba(test_data)
class MyKNN(object):
"""
bottom basic
support unequal length vector
"""
def __init__(self, n_neighbors=3):
self.n_neighbors = n_neighbors
self.train_data = None
self.train_label = None
self.labels = ['N', 'A', 'O', '~']
# self.thresh = [0.5, 0.3, 0.2, ]
def fit(self, train_data, train_label):
self.train_data = np.array(train_data)
self.train_label = np.array(train_label)
def dist(self, vec1, vec2):
res = 0.0
if len(vec1) <= len(vec2):
vec1 = np.r_[vec1, np.zeros(len(vec2)-len(vec1))]
else:
vec2 = np.r_[vec2, np.zeros(len(vec1)-len(vec2))]
dist_num = np.linalg.norm(vec1 - vec2)
return dist_num
def predict_prob(self, test_data):
test_data = np.array(test_data)
pred = []
for i in test_data:
tmp_dist_list = []
tmp_pred = []
for j in self.train_data:
tmp_dist_list.append(self.dist(i, j))
pred_n_neighbors = self.train_label[np.argsort(tmp_dist_list)[:self.n_neighbors]]
pred_counter = Counter(pred_n_neighbors)
# print(pred_counter)
for ii in self.labels:
tmp_pred.append(pred_counter[ii])
pred.append(tmp_pred)
return pred
def predict(self, test_data):
pred = self.predict_prob(test_data)
pred_label = []
for i in pred:
pred_label.append(self.labels[np.argsort(i)[-1]])
return pred_label
class MyGBDT(object):
"""
bottom basic a warpper for GradientBoostingClassifier
"""
def __init__(self):
self.clf = ensemble.GradientBoostingClassifier()
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyExtraTrees(object):
"""
bottom basic a warpper for ExtraTreesClassifier
"""
def __init__(self):
self.clf = ensemble.ExtraTreesClassifier(n_estimators=100)
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyAdaBoost(object):
"""
bottom basic a warpper for AdaBoostClassifier
"""
def __init__(self):
self.clf = ensemble.AdaBoostClassifier(n_estimators=100, learning_rate=0.1)
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyRF(object):
"""
bottom basic a warpper for Random Forest
"""
def __init__(self):
self.clf = ensemble.RandomForestClassifier(
n_estimators=1000, n_jobs=-1)
def fit(self,
train_data, train_label):
train_data =np.array(train_data)
self.clf.fit(train_data, train_label)
def predict(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
test_data = np.array(test_data)
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
test_data = np.array(test_data)
return list(list(self.clf.predict_proba(test_data))[0])
else:
test_data = np.array(test_data)
return self.clf.predict_proba(test_data)
class MyOptF(object):
"""
bottom basic classifier, a warpper for Opt F-score
"""
def __init__(self, alpha=0.5, epochs=10):
self.clf = OptF(alpha, epochs)
def fit(self, train_data, train_label):
self.clf.fit(train_data, deepcopy(train_label))
def predict(self, test_data):
return list(self.clf.predict(test_data))
def predict_prob(self, test_data):
return self.clf.predict_prob(test_data)
class RF(object):
"""
Top level classifier, a warpper for Random Forest
use long_feature and qrs_feature seperatedly, thus no use any more
deprecated
"""
def __init__(self):
self.clf = ensemble.RandomForestClassifier()
def fit(self,
train_long_data, train_long_label,
train_qrs_data, train_qrs_label):
train_data = np.c_[np.array(train_long_data), np.array(train_qrs_data)]
train_label = train_long_label
self.clf.fit(train_data, train_label)
def predict(self, test_long_data, test_qrs_data):
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
if test_data.ndim == 1:
test_data = np.expand_dims(np.array(test_data), axis=0)
return list(self.clf.predict(test_data))
def predict_prob(self, test_long_data, test_qrs_data):
test_long_data = np.array(test_long_data)
test_qrs_data = np.array(test_qrs_data)
if test_long_data.ndim == 1 or test_qrs_data.ndim == 1:
test_long_data = np.expand_dims(np.array(test_long_data), axis=0)
test_qrs_data = np.expand_dims(np.array(test_qrs_data), axis=0)
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
else:
test_data = np.c_[np.array(test_long_data), np.array(test_qrs_data)]
return list(list(self.clf.predict_proba(test_data))[0])
class RFSimp(object):
"""
Top level classifier, a warpper for Random Forest
use long/qrs feature
deprecated
"""
def __init__(self):
self.clf = ensemble.RandomForestClassifier()
def fit(self,
train_qrs_data, train_qrs_label):
train_data = | np.array(train_qrs_data) | numpy.array |
#!/usr/bin/env python
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from pandas_ml import ConfusionMatrix
import pickle
import numpy as np
def generate_metrics(y_true, y_pred, scores, class_labels):
# One-hot encode the truth (for multiclass metrics, if needed)
y_true_onehot = label_binarize(y_true, classes=class_labels)
m = {}
m["sklearn"] = {}
m["pandas-ml"] = {}
# Calculate accuracy
m["sklearn"]["acc"] = metrics.accuracy_score(y_true, y_pred)
# Confusion matrix
m["sklearn"]["confmat"] = metrics.confusion_matrix(
y_true, y_pred, labels=class_labels
)
# Generate classification report
m["sklearn"]["report"] = metrics.classification_report(
y_true, y_pred, target_names=class_labels
)
# Get AUCs
auc_indiv = metrics.roc_auc_score(y_true_onehot, scores, average=None)
m["sklearn"]["auc_indiv"] = auc_indiv
m["sklearn"]["auc_avg"] = | np.mean(auc_indiv) | numpy.mean |
from bitstring import Bits
def split_n(n, s):
return [ s[x:x+n] for x in range(0, len(s), n)]
def encode_floats(floats):
packed = map(lambda x: Bits(float=x, length=32).bin, floats)
return ''.join(packed)
def decode_floats(s):
return list(map(lambda s: Bits(bin=s).float, split_n(32, s)))
def flip_bit(bit):
return '1' if bit == '0' else '1'
import numpy as np
from neural_net import NeuralNetwork
class GeneticAlgorithm:
def __init__(self, population_size, crossover_rate, mutation_rate, n_layers):
self.population_size = population_size
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.n_layers = n_layers
self.population = self.generate_initial_population()
# these must be normalized at all times
self.fitnesses = self.zero_fitnesses()
def zero_fitnesses(self):
return | np.array( [1.0/self.population_size] * self.population_size) | numpy.array |
import time
import shutil
import os
import sys
import subprocess
import math
import pickle
import glob
import json
from copy import deepcopy
import warnings
import random
from multiprocessing import Pool
# import emukit.multi_fidelity as emf
# from emukit.model_wrappers.gpy_model_wrappers import GPyMultiOutputWrapper
# from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array, convert_xy_lists_to_arrays
try:
moduleName = "emukit"
import emukit.multi_fidelity as emf
from emukit.model_wrappers.gpy_model_wrappers import GPyMultiOutputWrapper
from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array, convert_xy_lists_to_arrays
moduleName = "pyDOE"
from pyDOE import lhs
moduleName = "GPy"
import GPy as GPy
moduleName = "scipy"
from scipy.stats import lognorm, norm
moduleName = "numpy"
import numpy as np
error_tag=False
except:
error_tag=True
class GpFromModel(object):
def __init__(self, work_dir, run_type, os_type, inp, errlog):
t_init = time.time()
self.errlog = errlog
self.work_dir = work_dir
self.os_type = os_type
self.run_type = run_type
#
# From external READ JSON FILE
#
rv_name = list()
self.g_name = list()
x_dim = 0
y_dim = 0
for rv in inp['randomVariables']:
rv_name = rv_name + [rv['name']]
x_dim += 1
if x_dim == 0:
msg = 'Error reading json: RV is empty'
errlog.exit(msg)
for g in inp['EDP']:
if g['length']==1: # scalar
self.g_name = self.g_name + [g['name']]
y_dim += 1
else: # vector
for nl in range(g['length']):
self.g_name = self.g_name + ["{}_{}".format(g['name'],nl+1)]
y_dim += 1
if y_dim == 0:
msg = 'Error reading json: EDP(QoI) is empty'
errlog.exit(msg)
# Accuracy is also sensitive to the range of X
self.id_sim = 0
self.x_dim = x_dim
self.y_dim = y_dim
self.rv_name = rv_name
self.do_predictive = False
automate_doe = False
surrogateInfo = inp["UQ_Method"]["surrogateMethodInfo"]
try:
self.do_parallel = surrogateInfo["parallelExecution"]
except:
self.do_parallel = True
if self.do_parallel:
if self.run_type.lower() == 'runninglocal':
self.n_processor = os.cpu_count()
from multiprocessing import Pool
self.pool = Pool(self.n_processor)
else:
# Always
from mpi4py import MPI
from mpi4py.futures import MPIPoolExecutor
self.world = MPI.COMM_WORLD
self.pool = MPIPoolExecutor()
self.n_processor = self.world.Get_size()
#self.n_processor =20
print("nprocessor :")
print(self.n_processor)
#self.cal_interval = 5
self.cal_interval = self.n_processor
else:
self.pool = 0
self.cal_interval = 5
if surrogateInfo["method"] == "Sampling and Simulation":
self.do_mf = False
do_sampling = True
do_simulation = True
self.use_existing = surrogateInfo["existingDoE"]
if self.use_existing:
self.inpData = os.path.join(work_dir, "templatedir/inpFile.in")
self.outData = os.path.join(work_dir, "templatedir/outFile.in")
thr_count = surrogateInfo['samples'] # number of samples
if surrogateInfo["advancedOpt"]:
self.doe_method = surrogateInfo["DoEmethod"]
if surrogateInfo["DoEmethod"] == "None":
do_doe = False
user_init = thr_count
else:
do_doe = True
user_init = surrogateInfo["initialDoE"]
else:
self.doe_method = "pareto" #default
do_doe = True
user_init = -100
elif surrogateInfo["method"] == "Import Data File":
self.do_mf = False
do_sampling = False
do_simulation = not surrogateInfo["outputData"]
self.doe_method = "None" # default
do_doe = False
# self.inpData = surrogateInfo['inpFile']
self.inpData = os.path.join(work_dir, "templatedir/inpFile.in")
if not do_simulation:
# self.outData = surrogateInfo['outFile']
self.outData = os.path.join(work_dir, "templatedir/outFile.in")
elif surrogateInfo["method"] == "Import Multi-fidelity Data File":
self.do_mf = True
self.doe_method = "None" # default
self.hf_is_model = surrogateInfo['HFfromModel']
self.lf_is_model = surrogateInfo['LFfromModel']
if self. hf_is_model:
self.use_existing_hf = surrogateInfo["existingDoE_HF"]
self.samples_hf = surrogateInfo["samples_HF"]
if self.use_existing_hf:
self.inpData = os.path.join(work_dir, "templatedir/inpFile_HF.in")
self.outData = os.path.join(work_dir, "templatedir/outFile_HF.in")
else:
self.inpData_hf = os.path.join(work_dir, "templatedir/inpFile_HF.in")
self.outData_hf = os.path.join(work_dir, "templatedir/outFile_HF.in")
self.X_hf = read_txt(self.inpData_hf, errlog)
self.Y_hf = read_txt(self.outData_hf, errlog)
if self.X_hf.shape[0] != self.Y_hf.shape[0]:
msg = 'Error reading json: high fidelity input and output files should have the same number of rows'
errlog.exit(msg)
if self.lf_is_model:
self.use_existing_lf = surrogateInfo["existingDoE_LF"]
self.samples_lf = surrogateInfo["samples_LF"]
if self.use_existing_lf:
self.inpData = os.path.join(work_dir, "templatedir/inpFile_LF.in")
self.outData = os.path.join(work_dir, "templatedir/outFile_LF.in")
else:
self.inpData_lf = os.path.join(work_dir, "templatedir/inpFile_LF.in")
self.outData_lf = os.path.join(work_dir, "templatedir/outFile_LF.in")
self.X_lf = read_txt(self.inpData_lf, errlog)
self.Y_lf = read_txt(self.outData_lf, errlog)
if self.X_lf.shape[0] != self.Y_lf.shape[0]:
msg = 'Error reading json: low fidelity input and output files should have the same number of rows'
errlog.exit(msg)
if (not self.hf_is_model) and self.lf_is_model:
self.mf_case = "data-model"
do_sampling = True
do_simulation = True
do_doe = surrogateInfo["doDoE"]
self.use_existing = self.use_existing_lf
if self.lf_is_model:
if self.use_existing_lf:
self.inpData = self.inpData_lf
self.oupData = self.outData_lf
else:
self.inpData = self.inpData_lf
self.outData = self.outData_lf
if do_doe:
user_init = -100
else:
user_init = self.samples_lf
thr_count = self.samples_lf # number of samples
elif self.hf_is_model and (not self.lf_is_model):
self.mf_case = "model-data"
do_sampling = True
do_simulation = True
do_doe = surrogateInfo["doDoE"]
self.use_existing = self.use_existing_hf
if self.hf_is_model:
if self.use_existing_hf:
self.inpData = self.inpData_hf
self.oupData = self.outData_hf
else:
self.inpData = self.inpData_hf
self.outData = self.outData_hf
if do_doe:
user_init = -100
else:
user_init = self.samples_hf
thr_count = self.samples_hf # number of samples
elif self.hf_is_model and self.lf_is_model:
self.mf_case = "model-model"
do_sampling = True
do_simulation = True
do_doe = surrogateInfo["doDoE"]
elif (not self.hf_is_model) and (not self.lf_is_model):
self.mf_case = "data-data"
do_sampling = False
do_simulation = False
do_doe = False
self.inpData = self.inpData_lf
self.outData = self.outData_lf
else:
msg = 'Error reading json: either select "Import Data File" or "Sampling and Simulation"'
errlog.exit(msg)
if surrogateInfo["advancedOpt"]:
self.do_logtransform = surrogateInfo["logTransform"]
kernel = surrogateInfo["kernel"]
do_linear = surrogateInfo["linear"]
nugget_opt = surrogateInfo["nuggetOpt"]
try:
self.nuggetVal = np.array(json.loads("[{}]".format(surrogateInfo["nuggetString"])))
except json.decoder.JSONDecodeError:
msg = 'Error reading json: improper format of nugget values/bounds. Provide nugget values/bounds of each QoI with comma delimiter'
errlog.exit(msg)
if self.nuggetVal.shape[0]!=self.y_dim and self.nuggetVal.shape[0]!=0 :
msg = 'Error reading json: Number of nugget quantities ({}) does not match # QoIs ({})'.format(self.nuggetVal.shape[0],self.y_dim)
errlog.exit(msg)
if nugget_opt == "Fixed Values":
for Vals in self.nuggetVal:
if (not np.isscalar(Vals)):
msg = 'Error reading json: provide nugget values of each QoI with comma delimiter'
errlog.exit(msg)
elif nugget_opt == "Fixed Bounds":
for Bous in self.nuggetVal:
if (np.isscalar(Bous)):
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif (isinstance(Bous,list)):
msg = 'Error reading json: provide both lower and upper bounds of nugget'
errlog.exit(msg)
elif Bous.shape[0]!=2:
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif Bous[0]>Bous[1]:
msg = 'Error reading json: the lower bound of a nugget value should be smaller than its upper bound'
errlog.exit(msg)
# if self.do_logtransform:
# mu = 0
# sig2 = self.nuggetVal
# #median = np.exp(mu)
# #mean = np.exp(mu + sig2/2)
# self.nuggetVal = np.exp(2*mu + sig2)*(np.exp(sig2)-1)
else:
self.do_logtransform = False
kernel = 'Matern 5/2'
do_linear = False
#do_nugget = True
nugget_opt = "optimize"
if not self.do_mf:
if do_simulation:
femInfo = inp["fem"]
self.inpFile = femInfo["inputFile"]
self.postFile = femInfo["postprocessScript"]
self.appName = femInfo["program"]
#
# get x points
#
if do_sampling:
thr_NRMSE = surrogateInfo["accuracyLimit"]
thr_t = surrogateInfo["timeLimit"] * 60
np.random.seed(surrogateInfo['seed'])
random.seed(surrogateInfo['seed'])
self.xrange = np.empty((0, 2), float)
for rv in inp['randomVariables']:
if "lowerbound" not in rv:
msg = 'Error in input RV: all RV should be set to Uniform distribution'
errlog.exit(msg)
self.xrange = np.vstack((self.xrange, [rv['lowerbound'], rv['upperbound']]))
self.len = np.abs(np.diff(self.xrange).T[0])
if sum(self.len == 0) > 0:
msg = 'Error in input RV: training range of RV should be greater than 0'
errlog.exit(msg)
#
# Read existing samples
#
if self.use_existing:
X_tmp = read_txt(self.inpData,errlog)
Y_tmp = read_txt(self.outData,errlog)
n_ex = X_tmp.shape[0]
if self.do_mf:
if X_tmp.shape[1] != self.X_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} RV column(s) but low fidelity model have {}.'.format(
self.X_hf.shape[1], X_tmp.shape[1])
errlog.exit(msg)
if Y_tmp.shape[1] != self.Y_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} QoI column(s) but low fidelity model have {}.'.format(
self.Y_hf.shape[1], Y_tmp.shape[1])
errlog.exit(msg)
if X_tmp.shape[1] != x_dim:
msg = 'Error importing input data: dimension inconsistent: have {} RV(s) but have {} column(s).'.format(
x_dim, X_tmp.shape[1])
errlog.exit(msg)
if Y_tmp.shape[1] != y_dim:
msg = 'Error importing input data: dimension inconsistent: have {} QoI(s) but have {} column(s).'.format(
y_dim, Y_tmp.shape[1])
errlog.exit(msg)
if n_ex != Y_tmp.shape[0]:
msg = 'Error importing input data: numbers of samples of inputs ({}) and outputs ({}) are inconsistent'.format(n_ex, Y_tmp.shape[0])
errlog.exit(msg)
else:
n_ex = 0
if user_init ==0:
#msg = 'Error reading json: # of initial DoE should be greater than 0'
#errlog.exit(msg)
user_init = -1;
X_tmp = np.zeros((0, x_dim))
Y_tmp = np.zeros((0, y_dim))
if user_init < 0:
n_init_ref = min(4 * x_dim, thr_count + n_ex - 1, 500)
if self.do_parallel:
n_init_ref = int(np.ceil(n_init_ref/self.n_processor)*self.n_processor) # Let's not waste resource
if n_init_ref > n_ex:
n_init = n_init_ref - n_ex
else:
n_init = 0
else:
n_init = user_init
n_iter = thr_count - n_init
def FEM_batch(Xs, id_sim):
return run_FEM_batch(Xs, id_sim, self.rv_name, self.do_parallel, self.y_dim, self.os_type, self.run_type, self.pool, t_init, thr_t)
# check validity of datafile
if n_ex > 0:
#Y_test, self.id_sim = FEM_batch(X_tmp[0, :][np.newaxis], self.id_sim)
# TODO : Fix this
print(X_tmp[0, :][np.newaxis].shape)
X_test, Y_test ,self.id_sim= FEM_batch(X_tmp[0, :][np.newaxis] ,self.id_sim)
if np.sum(abs((Y_test - Y_tmp[0, :][np.newaxis]) / Y_test) > 0.01, axis=1) > 0:
msg = 'Consistency check failed. Your data is not consistent to your model response.'
errlog.exit(msg)
if n_init>0:
n_init -= 1
else:
n_iter -= 1
#
# generate initial samples
#
if n_init>0:
U = lhs(x_dim, samples=(n_init))
X = np.vstack([X_tmp, np.zeros((n_init, x_dim))])
for nx in range(x_dim):
X[n_ex:n_ex+n_init, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
else:
X = X_tmp
if sum(abs(self.len / self.xrange[:, 0]) < 1.e-7) > 1:
msg = 'Error : upperbound and lowerbound should not be the same'
errlog.exit(msg)
n_iter = thr_count - n_init
else:
n_ex = 0
thr_NRMSE = 0.02 # default
thr_t = float('inf')
#
# Read sample locations from directory
#
X = read_txt(self.inpData,errlog)
if self.do_mf:
if X.shape[1] != self.X_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} RV column(s) but low fidelity model have {}.'.format(
self.X_hf.shape[1], X.shape[1])
errlog.exit(msg)
if X.shape[1] != x_dim:
msg = 'Error importing input data: Number of dimension inconsistent: have {} RV(s) but {} column(s).' \
.format(x_dim, X.shape[1])
errlog.exit(msg)
self.xrange = np.vstack([np.min(X, axis=0), np.max(X, axis=0)]).T
self.len = 2 * np.std(X, axis=0)
thr_count = X.shape[0]
n_init = thr_count
n_iter = 0
# give error
if thr_count <= 2:
msg = 'Number of samples should be greater than 2.'
errlog.exit(msg)
if do_doe:
ac = 1 # pre-screening time = time*ac
ar = 1 # cluster
n_candi = min(200 * x_dim, 2000) # candidate points
n_integ = min(200 * x_dim, 2000) # integration points
if user_init > thr_count:
msg = 'Number of DoE cannot exceed total number of simulation'
errlog.exit(msg)
else:
ac = 1 # pre-screening time = time*ac
ar = 1 # cluster
n_candi = 1 # candidate points
n_integ = 1 # integration points
user_init = thr_count
#
# get y points
#
if do_simulation:
#
# SimCenter workflow setting
#
if os.path.exists('{}/workdir.1'.format(work_dir)):
is_left = True
idx = 0
def change_permissions_recursive(path, mode):
for root, dirs, files in os.walk(path, topdown=False):
for dir in [os.path.join(root, d) for d in dirs]:
os.chmod(dir, mode)
for file in [os.path.join(root, f) for f in files]:
os.chmod(file, mode)
while is_left:
idx = idx + 1
try:
if os.path.exists('{}/workdir.{}/workflow_driver.bat'.format(work_dir, idx)):
#os.chmod('{}/workdir.{}'.format(work_dir, idx), 777)
change_permissions_recursive('{}/workdir.{}'.format(work_dir, idx), 0o777)
my_dir = '{}/workdir.{}'.format(work_dir, idx)
os.chmod(my_dir, 0o777)
shutil.rmtree(my_dir)
#shutil.rmtree('{}/workdir.{}'.format(work_dir, idx), ignore_errors=False, onerror=handleRemoveReadonly)
except Exception as ex:
print(ex)
is_left = True
break
print("Cleaned the working directory")
else:
print("Work directory is clean")
if os.path.exists('{}/dakotaTab.out'.format(work_dir)):
os.remove('{}/dakotaTab.out'.format(work_dir))
if os.path.exists('{}/inputTab.out'.format(work_dir)):
os.remove('{}/inputTab.out'.format(work_dir))
if os.path.exists('{}/outputTab.out'.format(work_dir)):
os.remove('{}/outputTab.out'.format(work_dir))
if os.path.exists('{}/SimGpModel.pkl'.format(work_dir)):
os.remove('{}/SimGpModel.pkl'.format(work_dir))
if os.path.exists('{}/verif.out'.format(work_dir)):
os.remove('{}/verif.out'.format(work_dir))
# func = self.__run_FEM(X,self.id_sim, self.rv_name)
#
# Generate initial samples
#
t_tmp = time.time()
X_fem, Y_fem ,self.id_sim= FEM_batch(X[n_ex:, :],self.id_sim)
Y = np.vstack((Y_tmp,Y_fem))
X = np.vstack((X[0:n_ex, :],X_fem))
t_sim_all = time.time() - t_tmp
if automate_doe:
self.t_sim_each = t_sim_all / n_init
else:
self.t_sim_each = float("inf")
#
# Generate predictive samples
#
if self.do_predictive:
n_pred = 100
Xt = np.zeros((n_pred, x_dim))
U = lhs(x_dim, samples=n_pred)
for nx in range(x_dim):
Xt[:, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
#
# Yt = np.zeros((n_pred, y_dim))
# for ns in range(n_pred):
# Yt[ns, :],self.id_sim = run_FEM(Xt[ns, :][np.newaxis],self.id_sim, self.rv_name)
Yt = np.zeros((n_pred, y_dim))
Xt, Yt ,self.id_sim= FEM_batch(Xt,self.id_sim)
else:
#
# READ SAMPLES FROM DIRECTORY
#
Y = read_txt(self.outData,errlog)
if self.do_mf:
if Y.shape[1] != self.Y_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} QoI column(s) but low fidelity model have {}.'.format(
self.Y_hf.shape[1], Y.shape[1])
errlog.exit(msg)
if Y.shape[1] != y_dim:
msg = 'Error importing input data: Number of dimension inconsistent: have {} QoI(s) but {} column(s).' \
.format(y_dim, Y.shape[1])
errlog.exit(msg)
if X.shape[0] != Y.shape[0]:
msg = 'Error importing input data: numbers of samples of inputs ({}) and outputs ({}) are inconsistent'.format(X.shape[0], Y.shape[0])
errlog.exit(msg)
thr_count = 0
self.t_sim_each = float("inf")
#
# GP function
#
if kernel == 'Radial Basis':
kr = GPy.kern.RBF(input_dim=x_dim, ARD=True)
elif kernel == 'Exponential':
kr = GPy.kern.Exponential(input_dim=x_dim, ARD=True)
elif kernel == 'Matern 3/2':
kr = GPy.kern.Matern32(input_dim=x_dim, ARD=True)
elif kernel == 'Matern 5/2':
kr = GPy.kern.Matern52(input_dim=x_dim, ARD=True)
if do_linear:
kr = kr + GPy.kern.Linear(input_dim=x_dim, ARD=True)
if not self.do_mf:
kg = kr
self.m_list = list()
for i in range(y_dim):
self.m_list = self.m_list + [GPy.models.GPRegression(X, Y[:, i][np.newaxis].transpose(), kernel=kg.copy(),normalizer=True)]
for parname in self.m_list[i].parameter_names():
if parname.endswith('lengthscale'):
exec('self.m_list[i].' + parname + '=self.len')
else:
kgs = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()])
if not self.hf_is_model:
if not X.shape[1]==self.X_hf.shape[1]:
msg = 'Error importing input data: dimension of low ({}) and high ({}) fidelity models (datasets) are inconsistent'.format(X.shape[1], self.X_hf.shape[1])
errlog.exit(msg)
if not self.lf_is_model:
if not X.shape[1]==self.X_lf.shape[1]:
msg = 'Error importing input data: dimension of low ({}) and high ({}) fidelity models (datasets) are inconsistent'.format(X.shape[1], self.X_hf.shape[1])
errlog.exit(msg)
if self.mf_case == 'data-model' or self.mf_case=='data-data':
X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays([X, self.X_hf], [Y, self.Y_hf])
elif self.mf_case == 'model-data':
X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays([self.X_lf, X], [self.Y_lf, Y])
self.m_list = list()
for i in range(y_dim):
self.m_list = self.m_list + [GPyMultiOutputWrapper(emf.models.GPyLinearMultiFidelityModel(X_list, Y_list, kernel=kgs.copy(), n_fidelities=2), 2, n_optimization_restarts=15)]
#
# Verification measures
#
self.NRMSE_hist = np.zeros((1, y_dim), float)
self.NRMSE_idx = np.zeros((1, 1), int)
#leng_hist = np.zeros((1, self.m_list[0]._param_array_.shape[0]), int)
if self.do_predictive:
self.NRMSE_pred_hist = np.empty((1, y_dim), float)
#
# Run DoE
#
break_doe = False
print("======== RUNNING GP DoE ===========")
exit_code = 'count' # num iter
i = 0
x_new = np.zeros((0,x_dim))
n_new = 0
doe_off = False # false if true
while not doe_off:
t = time.time()
if self.doe_method == "random":
do_cal = True
elif self.doe_method == "pareto":
do_cal = True
elif np.mod(i, self.cal_interval) == 0:
do_cal = True
else:
do_cal = False
t_tmp = time.time()
[x_new, self.m_list, err, idx, Y_cv, Y_cv_var] = self.__design_of_experiments(X, Y, ac, ar, n_candi,
n_integ, self.m_list,
do_cal, nugget_opt, do_doe)
t_doe = time.time() - t_tmp
print('DoE Time: {:.2f} s'.format(t_doe))
if automate_doe:
if t_doe > self.t_sim_each:
break_doe = True
print('========>> DOE OFF')
n_left = n_iter - i
break
if not self.do_mf:
NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf)
elif self.mf_case == 'model-data' :
NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
self.NRMSE_hist = np.vstack((self.NRMSE_hist, np.array(NRMSE_val)))
self.NRMSE_idx = np.vstack((self.NRMSE_idx, i))
if self.do_predictive:
Yt_pred = np.zeros((n_pred, y_dim))
for ny in range(y_dim):
y_pred_tmp, dummy = self.__predict(self.m_list[ny],Xt)
Yt_pred[:, ny] = y_pred_tmp.transpose()
if self.do_logtransform:
Yt_pred = np.exp(Yt_pred)
NRMSE_pred_val = self.__normalized_mean_sq_error(Yt_pred, Yt)
self.NRMSE_pred_hist = np.vstack((self.NRMSE_pred_hist, np.array(NRMSE_pred_val)))
if self.id_sim >= thr_count:
n_iter = i
exit_code = 'count'
doe_off = True
if not do_cal:
break_doe = False
n_left = 0
break
if np.max(NRMSE_val) < thr_NRMSE:
n_iter = i
exit_code = 'accuracy'
doe_off = True
if not do_cal:
break_doe = False
n_left = n_iter - i
break
if time.time() - t_init > thr_t - self.calib_time:
n_iter = i
exit_code = 'time'
doe_off = True
if not do_cal:
break_doe = False
n_left = n_iter - i
break
n_new = x_new.shape[0]
if not (n_new + self.id_sim < n_init + n_iter +1):
n_new = n_init + n_iter - self.id_sim
x_new = x_new[0:n_new, :]
i = self.id_sim + n_new
# y_new = np.zeros((n_new, y_dim))
# for ny in range(n_new):
# y_new[ny, :],self.id_sim = run_FEM(x_new[ny, :][np.newaxis],self.id_sim, self.rv_name)
x_new, y_new, self.id_sim = FEM_batch(x_new,self.id_sim)
#print(">> {:.2f} s".format(time.time() - t_init))
X = np.vstack([X, x_new])
Y = np.vstack([Y, y_new])
print("======== RUNNING GP Calibration ===========")
# not used
if break_doe:
X_tmp = np.zeros((n_left, x_dim))
Y_tmp = np.zeros((n_left, y_dim))
U = lhs(x_dim, samples=n_left)
for nx in range(x_dim):
# X[:,nx] = np.random.uniform(xrange[nx,0], xrange[nx,1], (1, n_init))
X_tmp[:, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
X_tmp, Y_tmp, self.id_sim = FEM_batch(X_tmp,self.id_sim)
# for ns in np.arange(n_left):
# Y_tmp[ns, :],self.id_sim = run_FEM(X_tmp[ns, :][np.newaxis],self.id_sim, self.rv_name)
# print(">> {:.2f} s".format(time.time() - t_init))
# if time.time() - t_init > thr_t - self.calib_time:
# X_tmp = X_tmp[:ns, :]
# Y_tmp = Y_tmp[:ns, :]
# break
X = np.vstack((X, X_tmp))
Y = np.vstack((Y, Y_tmp))
do_doe = False
# if not do_doe:
# exit_code = 'count'
#
# do_cal = True
# self.t_sim_each = float("inf") # so that calibration is not terminated in the middle
# self.m_list, Y_cv, Y_cv_var = self.__design_of_experiments(X, Y, 1, 1, 1, 1, self.m_list, do_cal,
# do_nugget, do_doe)
# if not self.do_mf:
# NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
# else:
# NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf)
sim_time = time.time() - t_init
n_samp = Y.shape[0]
# import matplotlib.pyplot as plt
# if self.x_dim==1:
# if self.do_mf:
# for ny in range(y_dim):
#
# x_plot = np.linspace(0, 1, 200)[:, np.newaxis]
# X_plot = convert_x_list_to_array([x_plot, x_plot])
# X_plot_l = X_plot[:len(x_plot)]
# X_plot_h = X_plot[len(x_plot):]
#
# lf_mean_lin_mf_model, lf_var_lin_mf_model = self.__predict(self.m_list[ny],X_plot_l)
# lf_std_lin_mf_model = np.sqrt(lf_var_lin_mf_model)
# hf_mean_lin_mf_model, hf_var_lin_mf_model = self.__predict(self.m_list[ny],X_plot_h)
# hf_std_lin_mf_model = np.sqrt(hf_var_lin_mf_model)
#
#
# plt.plot(x_plot, lf_mean_lin_mf_model);
# plt.plot(x_plot, hf_mean_lin_mf_model, '-');
# plt.plot(X, Y[:,ny], 'x');
# plt.plot(self.X_hf,self.Y_hf[:,ny], 'x');
# plt.show()
# else:
# for ny in range(y_dim):
# x_plot = np.linspace(0, 1, 200)[:, np.newaxis]
#
# hf_mean_lin_mf_model, hf_var_lin_mf_model = self.__predict(self.m_list[ny], x_plot)
#
# plt.plot(x_plot, hf_mean_lin_mf_model, '-');
# plt.plot(X, Y[:, ny], 'x');
# plt.show()
#
#
# plt.plot(Y_cv[:,0], self.Y_hf[:,0], 'x'); plt.show()
# plt.show()
# plt.plot(Y_cv[:,1], Y[:,1], 'x')
# plt.show()
print('my exit code = {}'.format(exit_code))
print('1. count = {}'.format(self.id_sim))
print('2. max(NRMSE) = {}'.format(np.max(NRMSE_val)))
print('3. time = {:.2f} s'.format(sim_time))
# for user information
if do_simulation:
n_err = 1000
Xerr = np.zeros((n_err, x_dim))
U = lhs(x_dim, samples=n_err)
for nx in range(x_dim):
Xerr[:, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
y_pred_var = np.zeros((n_err, y_dim))
y_data_var = np.zeros((n_err, y_dim))
for ny in range(y_dim):
# m_tmp = self.m_list[ny].copy()
m_tmp = self.m_list[ny]
if self.do_logtransform:
#y_var_val = np.var(np.log(Y[:, ny]))
log_mean = np.mean(np.log(Y[:, ny]))
log_var = np.var(np.log(Y[:, ny]))
y_var_val = np.exp(2*log_mean+log_var)*(np.exp(log_var)-1) # in linear space
else:
y_var_val = np.var(Y[:, ny])
for ns in range(n_err):
y_pred_tmp, y_pred_var_tmp = self.__predict(m_tmp,Xerr[ns, :][np.newaxis])
if self.do_logtransform:
y_pred_var[ns, ny] = np.exp(2 * y_pred_tmp + y_pred_var_tmp) * (np.exp(y_pred_var_tmp) - 1)
else:
y_pred_var[ns, ny] = y_pred_var_tmp
y_data_var[ns, ny] = y_var_val
#for parname in m_tmp.parameter_names():
# if ('Mat52' in parname) and parname.endswith('variance'):
# exec('y_pred_prior_var[ns,ny]=m_tmp.' + parname)
#error_ratio1_Pr = (y_pred_var / y_pred_prior_var)
error_ratio2_Pr = (y_pred_var / y_data_var)
#np.max(error_ratio1_Pr, axis=0)
np.max(error_ratio2_Pr, axis=0)
self.perc_thr = np.hstack([np.array([1]), np.arange(10, 1000, 50), np.array([999])])
error_sorted = np.sort(np.max(error_ratio2_Pr, axis=1), axis=0)
self.perc_val = error_sorted[self.perc_thr] # criteria
self.perc_thr = 1 - (self.perc_thr) * 0.001 # ratio=simulation/sampling
corr_val = np.zeros((y_dim,))
R2_val = np.zeros((y_dim,))
for ny in range(y_dim):
if not self.do_mf:
Y_ex = Y[:, ny]
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
Y_ex = self.Y_hf[:, ny]
elif self.mf_case == 'model-data':
Y_ex = Y[:, ny]
corr_val[ny] = np.corrcoef(Y_ex, Y_cv[:, ny])[0, 1]
R2_val[ny] = 1 - np.sum(pow(Y_cv[:, ny] - Y_ex, 2)) / np.sum(pow(Y_cv[:, ny] - np.mean(Y_cv[:, ny]), 2))
if np.var(Y_ex)==0:
corr_val[ny] = 1
R2_val[ny] = 0
self.kernel = kernel
self.NRMSE_val = NRMSE_val
self.corr_val = corr_val
self.R2_val = R2_val
self.Y_loo = Y_cv
self.X = X
self.Y = Y
self.do_sampling = do_sampling
self.do_simulation = do_simulation
self.do_doe = do_doe
self.do_linear = do_linear
self.exit_code = exit_code
self.thr_count = thr_count
self.thr_NRMSE = thr_NRMSE
self.thr_t = thr_t
self.NRMSE_val = NRMSE_val
self.sim_time = sim_time
self.n_samp = n_samp
self.n_sim = self.id_sim
self.y_loo = Y_cv
self.y_exa = Y
self.Y_loo_var = Y_cv_var
self.rvName = []
self.rvDist = []
self.rvVal = []
for nx in range(x_dim):
rvInfo = inp["randomVariables"][nx]
self.rvName = self.rvName + [rvInfo["name"]]
self.rvDist = self.rvDist + [rvInfo["distribution"]]
if do_sampling:
self.rvVal = self.rvVal + [(rvInfo["upperbound"] + rvInfo["lowerbound"]) / 2]
else:
self.rvVal = self.rvVal + [np.mean(X[:, nx])]
def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
warnings.filterwarnings("ignore")
t_opt = time.time()
m_list = list()
for ny in range(self.y_dim):
print("y dimension {}:".format(ny))
nopt = 10
#
# previous optimal
#
nugget_opt_tmp = nugget_opt
if not self.do_mf:
if np.var(m_tmp_list[ny].Y) == 0:
nugget_opt_tmp = "Zero"
for parname in m_tmp_list[ny].parameter_names():
if parname.endswith('variance'):
m_tmp_list[ny][parname].constrain_fixed(0)
m_init = m_tmp_list[ny]
m_tmp = m_init
if nugget_opt_tmp == "Optimize":
m_tmp['Gaussian_noise.variance'].unfix()
elif nugget_opt_tmp == "Fixed Values":
m_tmp['Gaussian_noise.variance'].constrain_fixed(self.nuggetVal[ny])
elif nugget_opt_tmp == "Fixed Bounds":
m_tmp['Gaussian_noise.variance'].constrain_bounded(self.nuggetVal[ny][0], self.nuggetVal[ny][1])
elif nugget_opt_tmp == "Zero":
m_tmp['Gaussian_noise.variance'].constrain_fixed(0)
m_tmp.optimize(clear_after_finish=True)
# m_tmp.optimize_restarts(5)
max_log_likli = m_tmp.log_likelihood()
t_unfix = time.time()
m = m_tmp.copy()
id_opt = 1
print('{} among {} Log-Likelihood: {}'.format(1, nopt, m_tmp.log_likelihood()))
#print(' Calibration time for each: {:.2f} s'.format(time.time() - t_unfix))
if time.time() - t_unfix > self.t_sim_each:
nopt = 1
#
# initial try
#
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
exec('m_tmp.' + parname + '=self.len')
if nugget_opt_tmp == "Optimize":
m_tmp['Gaussian_noise.variance'].unfix()
elif nugget_opt_tmp == "Fixed Values":
m_tmp['Gaussian_noise.variance'].constrain_fixed(self.nuggetVal[ny])
elif nugget_opt_tmp == "Fixed Bounds":
m_tmp['Gaussian_noise.variance'].constrain_bounded(self.nuggetVal[ny][0], self.nuggetVal[ny][1])
elif nugget_opt_tmp == "Zero":
m_tmp['Gaussian_noise.variance'].constrain_fixed(0)
m_tmp.optimize(clear_after_finish=True)
# m_tmp.optimize_restarts(5)
t_unfix = time.time()
if m_tmp.log_likelihood() > max_log_likli:
max_log_likli = m_tmp.log_likelihood()
m = m_tmp.copy()
id_opt = 1
print('{} among {} Log-Likelihood: {}'.format(2, nopt, m_tmp.log_likelihood()))
#print(' Calibration time for each: {:.2f} s'.format(time.time() - t_unfix))
if time.time() - t_unfix > self.t_sim_each:
nopt = 1
for no in range(nopt - 2):
# m_tmp=m.copy()
# m.randomize()
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
if math.isnan(m.log_likelihood()):
exec('m_tmp.' + parname + '=np.random.exponential(1, (1, x_dim)) * m_init.' + parname)
else:
exec('m_tmp.' + parname + '=np.random.exponential(1, (1, x_dim)) * m.' + parname)
if nugget_opt_tmp == "Optimize":
m_tmp['Gaussian_noise.variance'].unfix()
elif nugget_opt_tmp == "Fixed Values":
m_tmp['Gaussian_noise.variance'].constrain_fixed(self.nuggetVal[ny])
elif nugget_opt_tmp == "Fixed Bounds":
m_tmp['Gaussian_noise.variance'].constrain_bounded(self.nuggetVal[ny][0], self.nuggetVal[ny][1])
elif nugget_opt_tmp == "Zero":
m_tmp['Gaussian_noise.variance'].constrain_fixed(0)
t_fix = time.time()
try:
m_tmp.optimize()
# m_tmp.optimize_restarts(5)
except Exception as ex:
print("OS error: {0}".format(ex))
print('{} among {} Log-Likelihood: {}'.format(no + 3, nopt, m_tmp.log_likelihood()))
#print(' Calibration time for each: {:.2f} s'.format(time.time() - t_fix))
if m_tmp.log_likelihood() > max_log_likli:
max_log_likli = m_tmp.log_likelihood()
m = m_tmp.copy()
id_opt = no
if time.time() - t_unfix > self.t_sim_each:
nopt = 2 + no
break
if math.isinf(-max_log_likli) or math.isnan(-max_log_likli):
#msg = "Error GP optimization failed, perhaps QoI values are zero."
if np.var(m_tmp.Y) != 0:
msg = "Error GP optimization failed for QoI #{}".format(ny+1)
self.errlog.exit(msg)
m_list = m_list + [m]
print(m)
else:
if nugget_opt_tmp == "Optimize":
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.unfix()
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise_1.unfix()
elif nugget_opt_tmp == "Fixed Values":
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.constrain_fixed(self.nuggetVal[ny])
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise_1.constrain_fixed(self.nuggetVal[ny])
elif nugget_opt_tmp == "Fixed Bounds":
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.constrain_bounded(self.nuggetVal[ny][0], self.nuggetVal[ny][1])
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise_1.constrain_bounded(self.nuggetVal[ny][0], self.nuggetVal[ny][1])
elif nugget_opt_tmp == "Zero":
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.constrain_fixed(0)
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise_1.constrain_fixed(0)
#
# if not do_nugget:
# m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.fix(0)
# m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise_1.fix(0)
m_tmp_list[ny].optimize()
nopt = 5
id_opt = 0
self.calib_time = (time.time() - t_opt) * round(10 / nopt)
print(' Calibration time: {:.2f} s, id_opt={}'.format(self.calib_time, id_opt))
return m_tmp_list
def __design_of_experiments(self, X, Y, ac, ar, n_candi, n_integ, pre_m_list, do_cal, nugget_opt, do_doe):
# do log transform
if self.do_logtransform:
if np.min(Y)<0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
errlog.exit(msg)
Y = np.log(Y)
if self.do_mf:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
if np.min(self.Y_hf)<0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
errlog.exit(msg)
self.Y_hf = np.log(self.Y_hf)
elif self.mf_case == 'mode-data':
if np.min(self.Y_lf) < 0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
errlog.exit(msg)
self.Y_lf = np.log(self.Y_lf)
r = 1 # adaptively
y_dim = Y.shape[1]
x_dim = X.shape[1]
m_tmp_list = pre_m_list
for i in range(y_dim):
if not self.do_mf:
m_tmp_list[i].set_XY(X, Y[:, i][np.newaxis].transpose())
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
X_list_tmp, Y_list_tmp = emf.convert_lists_to_array.convert_xy_lists_to_arrays([X, self.X_hf],
[Y[:, i][np.newaxis].transpose(), self.Y_hf[:, i][np.newaxis].transpose()])
elif self.mf_case == 'model-data':
X_list_tmp, Y_list_tmp = emf.convert_lists_to_array.convert_xy_lists_to_arrays([self.X_lf, X],
[self.Y_lf[:, i][np.newaxis].transpose(),Y[:, i][np.newaxis].transpose()])
m_tmp_list[i].set_data(X=X_list_tmp,Y=Y_list_tmp)
if do_cal:
m_list = self.__parameter_calibration(m_tmp_list, x_dim, nugget_opt)
else:
m_list = m_tmp_list.copy()
#
# cross validation errors
#
Y_pred, Y_pred_var, e2 = self.__get_cross_validation(X,Y,m_list)
if self.do_logtransform:
mu = Y_pred
sig2 = Y_pred_var
median = np.exp(mu)
mean = np.exp(mu + sig2/2)
var = np.exp(2*mu + sig2)*(np.exp(sig2)-1)
Y_pred = median
Y_pred_var = var
if self.do_mf:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
self.Y_hf = np.exp(self.Y_hf)
elif self.mf_case == 'model-data':
self.Y_lf = np.exp(self.Y_lf)
if not do_doe:
return 0, m_list, 0, 0, Y_pred, Y_pred_var
#
# candidates of DoE
#
y_var = np.var(Y, axis=0) # normalization
y_idx = np.argmax(np.sum(e2 / y_var, axis=0)) # dimension of interest
m_tmp_list = m_list.copy()
m_idx = m_tmp_list[y_idx]
#
# SCREENING score_tmp function of each candidate
#
nc1 = round(n_candi)
self.doe_method = self.doe_method.lower()
if self.doe_method == "pareto":
#
# Initial candidates
#
xc1 = np.zeros((nc1, x_dim))
for nx in range(x_dim):
xc1[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nc1)) # LHS
nq = round(n_integ)
xq = np.zeros((nq, x_dim))
for nx in range(x_dim):
xq[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nq))
#
# Lets Do Pareto
#
yc1_pred, yc1_var = self.__predict(m_idx, xc1) # use only variance
score1 = np.zeros(yc1_pred.shape)
cri1 = np.zeros(yc1_pred.shape)
cri2 = np.zeros(yc1_pred.shape)
# TODO: is this the best?
ll = self.xrange[:, 1] - self.xrange[:, 0]
for i in range(nc1):
if not self.do_mf:
wei = self.weights_node2(xc1[i, :], X, ll)
#phi = e2[closest_node(xc1[i, :], X, ll)]
#phi = e2[self.__closest_node(xc1[i, :], X)]
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
wei = self.weights_node2(xc1[i, :], self.X_hf, ll)
#phi = e2[closest_node(xc1[i, :], self.X_hf, ll)]
#phi = e2[self.__closest_node(xc1[i, :], self.X_hf)]
elif self.mf_case == 'model-data':
wei = self.weights_node2(xc1[i, :], X, ll)
#phi = e2[closest_node(xc1[i, :], X, ll)]
#phi = e2[self.__closest_node(xc1[i, :], X)]
#cri1[i] = yc1_var[i]
cri2[i] = sum(e2[:, y_idx] / Y_pred_var[:, y_idx] * wei.T)
#cri2[i] = pow(phi[y_idx],r)
VOI = np.zeros(yc1_pred.shape)
for i in range(nc1):
pdfvals = m_idx.kern.K(np.array([xq[i]]), xq)**2/m_idx.kern.K(np.array([xq[0]]))**2
VOI[i] = np.mean(pdfvals)*np.prod(np.diff(self.xrange,axis=1)) # * np.prod(np.diff(self.xrange))
cri1[i] = yc1_var[i] * VOI[i]
cri1 = (cri1-np.min(cri1))/(np.max(cri1)-np.min(cri1))
cri2 = (cri2-np.min(cri2))/(np.max(cri2)-np.min(cri2))
logcrimi1 = np.log(cri1[:, 0])
logcrimi2 = np.log(cri2[:, 0])
idx_pareto_front = list()
rankid = np.zeros(nc1)
varRank = np.zeros(nc1)
biasRank = np.zeros(nc1)
for id in range(nc1):
idx_tmp = np.argwhere((logcrimi1 >= logcrimi1[id]) * (logcrimi2 >= logcrimi2[id]))
varRank[id] = np.sum((logcrimi1 >= logcrimi1[id]))
biasRank[id] = np.sum((logcrimi2 >= logcrimi2[id]))
rankid[id] = idx_tmp.size
idx_rank = np.argsort(rankid)
sort_rank = np.sort(rankid)
num_1rank = np.sum(rankid==1)
idx_1rank = list((np.argwhere(rankid==1)).flatten())
npareto = 4
if num_1rank < self.cal_interval:
prob = np.ones((nc1,))
prob[list(rankid==1)]=0
prob=prob/sum(prob)
idx_pareto = idx_1rank + list(np.random.choice(nc1, self.cal_interval-num_1rank, p=prob))
else:
idx_pareto_candi = idx_1rank.copy()
X_tmp = X
Y_tmp = Y[:,y_idx][np.newaxis].T
m_tmp = m_idx.copy()
# get MMSEw
score = np.squeeze(cri1*cri2)
score_candi = score[idx_pareto_candi]
best_local = np.argsort(-score_candi)[0]
best_global = idx_1rank[best_local]
idx_pareto_new = [best_global]
del idx_pareto_candi[best_local]
for i in range(self.cal_interval-1):
X_tmp = np.vstack([X_tmp, xc1[best_global, :][np.newaxis]])
Y_tmp = np.vstack([Y_tmp, np.array([[0]]) ]) # any variables
m_tmp.set_XY(X=X_tmp, Y=Y_tmp)
dummy, Yq_var = m_tmp.predict(xc1[idx_pareto_candi, :])
cri1 = Yq_var * VOI[idx_pareto_candi]
cri1 = (cri1 - np.min(cri1)) / (np.max(cri1) - np.min(cri1))
score_tmp = cri1 * cri2[idx_pareto_candi] # only update the variance
best_local = np.argsort(-np.squeeze(score_tmp))[0]
best_global = idx_pareto_candi[best_local]
idx_pareto_new = idx_pareto_new + [best_global]
del idx_pareto_candi[best_local]
#score_tmp = Yq_var * cri2[idx_pareto_left]/Y_pred_var[closest_node(xc1[i, :], X, self.m_list, self.xrange)]
#idx_pareto = list(idx_rank[0:self.cal_interval])
idx_pareto = idx_pareto_new
update_point = xc1[idx_pareto, :]
update_IMSE = 0
# import matplotlib.pyplot as plt
# plt.plot(logcrimi1, logcrimi2, 'x');plt.plot(logcrimi1[idx_pareto], logcrimi2[idx_pareto], 'x'); plt.show()
# plt.plot(m_idx.X[:,0], m_idx.X[:,1], 'x'); plt.show()
# plt.plot(X[:, 0],X[:, 1], 'ro');
# plt.scatter(xc1[:,0], xc1[:,1], c=cri2); plt.plot(xc1[rankid==0,0], xc1[rankid==0,1], 'rx'); plt.show()
# plt.scatter(xc1[:,0], xc1[:,1], c=cri2); plt.plot(update_point[:,0], update_point[:,1], 'rx'); plt.show()
# plt.scatter(xc1[:, 0], xc1[:, 1], c=cri2); plt.show()
#
'''
idx_pareto = list()
for id in range(nc1):
idx_tmp = np.argwhere(logcrimi2 >= logcrimi2[id])
if np.sum(logcrimi1[idx_tmp[:, 0]] >= logcrimi1[id]) == 1:
idx_pareto = idx_pareto + [id]
if len(idx_pareto) == 0:
idx_pareto = np.arange(self.cal_interval)
if len(idx_pareto) > self.cal_interval:
random_indices = random.sample(range(len(idx_pareto)), self.cal_interval) # get 2 random indices
idx_pareto2 = np.asarray(random_indices)
idx_pareto = np.asarray(idx_pareto)
idx_pareto = list(idx_pareto[idx_pareto2[0:self.cal_interval]])
'''
elif self.doe_method == "imsew":
nq = round(n_integ)
m_stack = m_idx.copy()
X_stack = X
Y_stack = Y
update_point = np.zeros((self.cal_interval,self.x_dim))
update_IMSE = np.zeros((self.cal_interval,1))
#
# Initial candidates
#
for ni in range(self.cal_interval):
#
# Initial candidates
#
xc1 = np.zeros((nc1, x_dim))
for nx in range(x_dim):
xc1[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nc1)) # LHS
xq = np.zeros((nq, x_dim))
for nx in range(x_dim):
xq[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nq))
#TODO: is diff(xrange) the best?
ll = self.xrange[:, 1] - self.xrange[:, 0]
phiq = np.zeros((nq, y_dim))
for i in range(nq):
phiq[i,:] = e2[closest_node(xq[i, :], X, ll)]
phiqr = pow(phiq[:, y_idx], r)
if self.do_parallel:
tmp = time.time()
iterables = ((m_stack.copy(), xc1[i,:][np.newaxis], xq, phiqr, i) for i in range(nc1))
result_objs = list(self.pool.starmap(imse, iterables))
IMSEc1 = np.zeros(nc1)
for IMSE_val, idx in result_objs:
IMSEc1[idx] = IMSE_val
print("IMSE: finding the next DOE {} in a parallel way.. time = {}".format(ni,time.time() -tmp)) # 7s # 3-4s
else:
tmp = time.time()
phiqr = pow(phiq[:, y_idx], r)
IMSEc1 = np.zeros(nc1)
for i in range(nc1):
IMSEc1[i], dummy = imse(m_stack.copy(), xc1[i,:][np.newaxis], xq, phiqr, i)
print("IMSE: finding the next DOE {} in a serial way.. time = {}".format(ni,time.time() -tmp)) # 4s
new_idx = np.argmin(IMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
X_stack = np.vstack([X_stack, x_point])
Y_stack = np.zeros((Y_stack.shape[0] + 1, Y.shape[1])) # any variables
m_stack.set_XY(X=X_stack, Y=Y_stack)
update_point[ni, :] = x_point
update_IMSE[ni, :] = IMSEc1[new_idx]
# import matplotlib.pyplot as plt; plt.scatter(xc1[:,0],xc1[:,1],c = IMSEc1); plt.show()
# import matplotlib.pyplot as plt; plt.scatter(xc1[:,0],xc1[:,1],c = IMSEc1); plt.plot(update_point[:,0],update_point[:,1],'x'); plt.show()
# import matplotlib.pyplot as plt; plt.scatter(X_stack[:,0],X_stack[:,1]); plt.show()
'''
nc1 = round(n_candi)
xc1 = np.zeros((nc1, x_dim))
for nx in range(x_dim):
xc1[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nc1)) # LHS
yc1_pred, yc1_var = self.__predict(m_idx, xc1) # use only variance
score1 = np.zeros(yc1_pred.shape)
cri1 = np.zeros(yc1_pred.shape)
cri2 = np.zeros(yc1_pred.shape)
for i in range(nc1):
if not self.do_mf:
phi = e2[self.__closest_node(xc1[i, :], X)]
else:
phi = e2[self.__closest_node(xc1[i, :], self.X_hf)]
score1[i] = yc1_var[i] * pow(phi[y_idx], r)
cri1[i] = yc1_var[i]
cri2[i] = pow(phi[y_idx], r)
sort_idx_score1 = np.argsort(-score1.T) # (-) sign to make it descending order
nc2 = round(nc1 * ac)
xc2 = xc1[sort_idx_score1[0, 0:nc2], :]
score2 = score1[sort_idx_score1[0, 0:nc2]]
nc3 = round(nc2 * ar)
if ar != 1:
xc2_norm = np.zeros((nc2, x_dim))
for nx in range(x_dim):
xc2_norm[:, nx] = (xc2[:, nx] - self.xrange[nx, 0]) / (
self.xrange[nx, 1] - self.xrange[nx, 0]) # additional weights?
# n_clusters =1
km_model = KMeans(n_clusters=max(1, nc3))
km_model.fit(xc2_norm)
idx_cluster = km_model.predict(xc2_norm)
global_idx_cluster = np.zeros((nc3, 1), dtype=np.int64)
for i in range(nc3):
ith_cluster_comps = np.where(idx_cluster == i)[0]
idx = np.argsort(-score2[ith_cluster_comps].T)[0][0]
global_idx_cluster[i, 0] = ith_cluster_comps[idx]
xc3 = xc2[global_idx_cluster.T, :][0]
score3 = score2[global_idx_cluster.T][0]
else:
xc3 = xc2
score3 = score2
#
# get IMSE
#
nq = round(n_integ)
xq = np.zeros((nq, x_dim))
for nx in range(x_dim):
xq[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nq))
phi = np.zeros((nq, y_dim))
for i in range(nq):
phi[i, :] = e2[self.__closest_node(xq[i, :], X)]
IMSE = np.zeros((nc3,))
m_tmp = m_idx.copy()
for i in range(nc3):
X_tmp = np.vstack([X, xc3[i, :][np.newaxis]])
Y_tmp = np.zeros((Y.shape[0] + 1, Y.shape[1])) # any variables
m_tmp.set_XY(X=X_tmp, Y=Y_tmp)
dummy, Yq_var = m_tmp.predict(xq)
IMSE[i] = 1 / nq * sum(pow(phi[:, y_idx], r) * Yq_var.T[0])
new_idx = np.argmin(IMSE, axis=0)
print(np.min(IMSE))
update_point = xc3[new_idx, :][np.newaxis]
update_IMSE = IMSE[new_idx]
'''
elif self.doe_method == "random":
update_point = xc1[0:self.cal_interval, :]
update_IMSE = 0
elif self.doe_method == "mmse":
sort_idx_score1 = np.argsort(-cri1.T) # (-) sign to make it descending order
nc2 = round(nc1 * ac)
xc2 = xc1[sort_idx_score1[0, 0:nc2], :]
update_point = xc2[0:1, :]
update_IMSE = 0
elif self.doe_method == "mmsew":
#
# Initial candidates
#
xc1 = np.zeros((nc1, x_dim))
for nx in range(x_dim):
xc1[:, nx] = np.random.uniform(self.xrange[nx, 0], self.xrange[nx, 1], (1, nc1)) # LHS
m_stack = m_idx.copy()
ll = self.xrange[:, 1] - self.xrange[:, 0]
phic = np.zeros((nc1, y_dim))
for i in range(nc1):
phic[i, :] = e2[closest_node(xc1[i, :], X, ll)]
phicr = pow(phic[:, y_idx], r)
X_stack = X
Y_stack = Y
update_point = np.zeros((self.cal_interval,self.x_dim))
update_IMSE = np.zeros((self.cal_interval,1))
for ni in range(self.cal_interval):
yc1_pred, yc1_var = m_stack.predict(xc1) # use only variance
MMSEc1 = yc1_var.flatten() * phicr.flatten()
new_idx = np.argmax(MMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
X_stack = np.vstack([X_stack, x_point])
Y_stack = np.zeros((Y_stack.shape[0] + 1, Y.shape[1])) # any variables
m_stack.set_XY(X=X_stack, Y=Y_stack)
update_point[ni, :] = x_point
update_IMSE[ni, :] = MMSEc1[new_idx]
else:
msg = 'Error running SimCenterUQ: cannot identify the doe method <' + self.doe_method + '>'
errlog.exit(msg)
return update_point, m_list, update_IMSE, y_idx, Y_pred, Y_pred_var
def __normalized_mean_sq_error(self, yp, ye):
nt = yp.shape[0]
data_bound = (np.max(ye, axis=0) - np.min(ye, axis=0))
RMSE = np.sqrt(1 / nt * np.sum(pow(yp - ye, 2), axis=0))
NRMSE =RMSE/data_bound
NRMSE[np.argwhere((data_bound ==0))]=0
return NRMSE
def __closest_node(self, node, nodes):
nodes = np.asarray(nodes)
deltas = nodes - node
deltas_norm = np.zeros(deltas.shape)
for nx in range(self.x_dim):
deltas_norm[:, nx] = (deltas[:, nx]) / (self.xrange[nx, 1] - self.xrange[nx, 0]) # additional weights?
# np.argmin(np.sum(pow(deltas_norm,2),axis=1))
dist_2 = np.einsum('ij,ij->i', deltas_norm, deltas_norm)
return np.argmin(dist_2)
def __from_XY_into_list(self, X, Y):
x_list = list()
y_list = list()
for i in range(Y.shape[1]):
x_list = x_list + [X, ]
y_list = y_list + [Y[:, [i, ]], ]
return x_list, y_list
def __predict(self, m, X):
if not self.do_mf:
return m.predict(X)
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
X_list = convert_x_list_to_array([X, X])
X_list_l = X_list[:X.shape[0]]
X_list_h = X_list[X.shape[0]:]
return m.predict(X_list_h)
elif self.mf_case == 'model-data':
#return m.predict(X)
X_list = convert_x_list_to_array([X, X])
X_list_l = X_list[:X.shape[0]]
X_list_h = X_list[X.shape[0]:]
return m.predict(X_list_h)
def __get_cross_validation(self,X,Y,m_list):
if not self.do_mf:
e2 = np.zeros(Y.shape)
Y_pred = np.zeros(Y.shape)
Y_pred_var = np.zeros(Y.shape)
for ny in range(Y.shape[1]):
m_tmp = m_list[ny].copy()
for ns in range(X.shape[0]):
X_tmp = np.delete(X, ns, axis=0)
Y_tmp = np.delete(Y, ns, axis=0)
m_tmp.set_XY(X=X_tmp, Y=Y_tmp[:, ny][np.newaxis].transpose())
x_loo = X[ns, :][np.newaxis]
# Y_pred_tmp, Y_err_tmp = m_tmp.predict(x_loo)
Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp,x_loo)
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow((Y_pred[ns, ny] - Y[ns, ny]), 2) # for nD outputs
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
e2 = np.zeros(self.Y_hf.shape)
Y_pred = np.zeros(self.Y_hf.shape)
Y_pred_var = np.zeros(self.Y_hf.shape)
for ny in range(Y.shape[1]):
m_tmp = deepcopy(m_list[ny])
for ns in range(self.X_hf.shape[0]):
X_hf_tmp = np.delete(self.X_hf, ns, axis=0)
Y_hf_tmp = np.delete(self.Y_hf, ns, axis=0)
X_list_tmp, Y_list_tmp = emf.convert_lists_to_array.convert_xy_lists_to_arrays([X, X_hf_tmp],
[Y[:, ny][np.newaxis].transpose(), Y_hf_tmp[:, ny][np.newaxis].transpose()])
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
x_loo = self.X_hf[ns][np.newaxis]
Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp,x_loo)
Y_pred[ns,ny] = Y_pred_tmp
Y_pred_var[ns,ny] = Y_err_tmp
e2[ns,ny] = pow((Y_pred[ns,ny] - self.Y_hf[ns,ny]), 2) # for nD outputs
elif self.mf_case == 'model-data':
e2 = np.zeros(Y.shape)
Y_pred = np.zeros(Y.shape)
Y_pred_var = np.zeros(Y.shape)
for ny in range(Y.shape[1]):
m_tmp = deepcopy(m_list[ny])
for ns in range(X.shape[0]):
X_tmp = np.delete(X, ns, axis=0)
Y_tmp = np.delete(Y, ns, axis=0)
X_list_tmp, Y_list_tmp = emf.convert_lists_to_array.convert_xy_lists_to_arrays([self.X_lf, X_tmp],
[self.Y_lf[:, ny][np.newaxis].transpose(), Y_tmp[:, ny][np.newaxis].transpose()])
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
#x_loo = np.hstack((X[ns], 1))[np.newaxis]
x_loo = self.X_hf[ns][np.newaxis]
Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp,x_loo)
Y_pred[ns,ny] = Y_pred_tmp
Y_pred_var[ns,ny] = Y_err_tmp
e2[ns,ny] = pow((Y_pred[ns,ny] - Y[ns,ny]), 2) # for nD outputs
return Y_pred, Y_pred_var, e2
def term(self):
if self.do_parallel:
if self.run_type != "runningLocal":
print("RUNNING SUCCESSFUL")
self.world.Abort(0) # to prevent deadlock
def save_model(self, filename):
import json
with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file:
pickle.dump(self.m_list, file)
# json.dump(self.m_list, file)
header_string_x = ' ' + ' '.join([str(elem) for elem in self.rv_name]) + ' '
header_string_y = ' ' + ' '.join([str(elem) for elem in self.g_name])
header_string = header_string_x + header_string_y
if not self.do_mf:
xy_data = np.concatenate((np.asmatrix(np.arange(1, self.X.shape[0] + 1)).T, self.X, self.Y), axis=1)
else:
if not self.hf_is_model:
xy_data = np.concatenate((np.asmatrix(np.arange(1, self.X_hf.shape[0] + 1)).T, self.X_hf, self.Y_hf), axis=1)
else:
xy_data = np.concatenate((np.asmatrix(np.arange(1, self.X.shape[0] + 1)).T, self.X, self.Y), axis=1)
np.savetxt(self.work_dir + '/dakotaTab.out', xy_data, header=header_string, fmt='%1.4e', comments='%')
| np.savetxt(self.work_dir + '/inputTab.out', self.X, header=header_string_x, fmt='%1.4e', comments='%') | numpy.savetxt |
#!/usr/bin/env python
# Copyrigh 2018 <EMAIL>
# MIT Licence
from __future__ import print_function
import torch
import numpy as np
import pdb
try:
xrange
except:
xrange = range # python3
class AnchorGenerator:
def __init__(self, num_anchors_per_frame=20, min_box_size=30, max_box_size=220, max_utt_length=1500, device="cuda"):
self.num_anchors_per_frame = num_anchors_per_frame
self.max_box_size = max_box_size
self.min_box_size = min_box_size
self.max_utt_length = max_utt_length
x = self._generate_basic_anchors(num_anchors_per_frame, min_box_size, max_box_size)
self.basic_anchors = torch.from_numpy(x).float()
self._update(max_utt_length, device)
def _generate_basic_anchors(self, num_anchors_per_frame, min_window_size, max_window_size):
shift = (max_window_size-min_window_size)/1.0/(num_anchors_per_frame-1)
start_indexes = np.arange(min_window_size, max_window_size+1, shift)
basic_anchors = np.zeros([num_anchors_per_frame, 2])
basic_anchors[:,0]=-start_indexes
basic_anchors[:,1]=0
return basic_anchors
def _generate_log_anchors(self, num_anchors_per_frame, min_window_size, max_window_size):
log_min = np.log(min_window_size)
log_max = np.log(max_window_size)
log_shift = (log_max-log_min)/1.0/(num_anchors_per_frame-1)
log_start_indexes = np.arange(log_min, log_max+log_shift, log_shift)
start_indexes = np.exp(log_start_indexes)
basic_anchors = | np.zeros([num_anchors_per_frame, 2]) | numpy.zeros |
"""
Lean rigid transformation class
Author: Jeff
"""
import logging
import os
import numpy as np
import scipy.linalg
from . import utils
from . import transformations
from .points import BagOfPoints, BagOfVectors, Point, PointCloud, Direction, NormalCloud
from .dual_quaternion import DualQuaternion
try:
from geometry_msgs import msg
except:
logging.warning('Failed to import geometry msgs in rigid_transformations.py.')
try:
import rospy
import rosservice
except ImportError:
logging.warning("Failed to import ros dependencies in rigid_transforms.py")
try:
from autolab_core.srv import *
except ImportError:
logging.warning("autolab_core not installed as catkin package, RigidTransform ros methods will be unavailable")
import subprocess
TF_EXTENSION = '.tf'
STF_EXTENSION = '.stf'
class RigidTransform(object):
"""A Rigid Transformation from one frame to another.
"""
def __init__(self, rotation=np.eye(3), translation=np.zeros(3),
from_frame='unassigned', to_frame='world'):
"""Initialize a RigidTransform.
Parameters
----------
rotation : :obj:`numpy.ndarray` of float
A 3x3 rotation matrix (should be unitary).
translation : :obj:`numpy.ndarray` of float
A 3-entry translation vector.
from_frame : :obj:`str`
A name for the frame of reference on which this transform
operates. This and to_frame are used for checking compositions
of RigidTransforms, which is useful for debugging and catching
errors.
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
Raises
------
ValueError
If any of the arguments are invalid. The frames must be strings or
unicode, the translations and rotations must be ndarrays, have the
correct shape, and the determinant of the rotation matrix should be
1.0.
"""
if not isinstance(from_frame, str) and not isinstance(from_frame, unicode):
raise ValueError('Must provide string name of input frame of data')
if not isinstance(to_frame, str) and not isinstance(to_frame, unicode):
raise ValueError('Must provide string name of output frame of data')
self.rotation = rotation
self.translation = translation
self._from_frame = str(from_frame)
self._to_frame = str(to_frame)
def copy(self):
"""Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
"""
return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame)
def _check_valid_rotation(self, rotation):
"""Checks that the given rotation matrix is valid.
"""
if not isinstance(rotation, np.ndarray) or not np.issubdtype(rotation.dtype, np.number):
raise ValueError('Rotation must be specified as numeric numpy array')
if len(rotation.shape) != 2 or rotation.shape[0] != 3 or rotation.shape[1] != 3:
raise ValueError('Rotation must be specified as a 3x3 ndarray')
if np.abs(np.linalg.det(rotation) - 1.0) > 1e-3:
raise ValueError('Illegal rotation. Must have determinant == 1.0')
def _check_valid_translation(self, translation):
"""Checks that the translation vector is valid.
"""
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError('Translation must be specified as numeric numpy array')
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray')
@property
def rotation(self):
""":obj:`numpy.ndarray` of float: A 3x3 rotation matrix.
"""
return self._rotation
@rotation.setter
def rotation(self, rotation):
# Convert quaternions
if len(rotation) == 4:
q = np.array([q for q in rotation])
if np.abs(np.linalg.norm(q) - 1.0) > 1e-3:
raise ValueError('Invalid quaternion. Must be norm 1.0')
rotation = RigidTransform.rotation_from_quaternion(q)
# Convert lists and tuples
if type(rotation) in (list, tuple):
rotation = np.array(rotation).astype(np.float32)
self._check_valid_rotation(rotation)
self._rotation = rotation * 1.
@property
def translation(self):
""":obj:`numpy.ndarray` of float: A 3-ndarray that represents the
transform's translation vector.
"""
return self._translation
@translation.setter
def translation(self, translation):
# Convert lists to translation arrays
if type(translation) in (list, tuple) and len(translation) == 3:
translation = np.array([t for t in translation]).astype(np.float32)
self._check_valid_translation(translation)
self._translation = translation.squeeze() * 1.
@property
def position(self):
""":obj:`numpy.ndarray` of float: A 3-ndarray that represents the
transform's translation vector (same as translation).
"""
return self._translation
@position.setter
def position(self, position):
self.translation = position
@property
def adjoint_tf(self):
A = np.zeros([6,6])
A[:3,:3] = self.rotation
A[3:,:3] = utils.skew(self.translation).dot(self.rotation)
A[3:,3:] = self.rotation
return A
@property
def from_frame(self):
""":obj:`str`: The identifier for the 'from' frame of reference.
"""
return self._from_frame
@from_frame.setter
def from_frame(self, from_frame):
self._from_frame = str(from_frame)
@property
def to_frame(self):
""":obj:`str`: The identifier for the 'to' frame of reference.
"""
return self._to_frame
@to_frame.setter
def to_frame(self, to_frame):
self._to_frame = str(to_frame)
@property
def euler_angles(self):
""":obj:`tuple` of float: The three euler angles for the rotation.
"""
q_wxyz = self.quaternion
q_xyzw = np.roll(q_wxyz, -1)
return transformations.euler_from_quaternion(q_xyzw)
@property
def quaternion(self):
""":obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
"""
q_xyzw = transformations.quaternion_from_matrix(self.matrix)
q_wxyz = np.roll(q_xyzw, 1)
return q_wxyz
@property
def dual_quaternion(self):
""":obj:`DualQuaternion`: The DualQuaternion corresponding to this
transform.
"""
qr = self.quaternion
qd = np.append([0], self.translation / 2.)
return DualQuaternion(qr, qd)
@property
def axis_angle(self):
""":obj:`numpy.ndarray` of float: The axis-angle representation for the rotation.
"""
qw, qx, qy, qz = self.quaternion
theta = 2 * | np.arccos(qw) | numpy.arccos |
import os
import sys
import time
import pdb
import gc
import numpy as np
import faiss
import argparse
import resource
import benchmark.datasets
from benchmark.datasets import DATASETS
from benchmark.plotting import eval_range_search
####################################################################
# Index building functions
####################################################################
def two_level_clustering(xt, nc1, nc2, clustering_niter=25, spherical=False):
d = xt.shape[1]
print(f"2-level clustering of {xt.shape} nb clusters = {nc1}*{nc2} = {nc1*nc2}")
print("perform coarse training")
km = faiss.Kmeans(
d, nc1, verbose=True, niter=clustering_niter,
max_points_per_centroid=2000,
spherical=spherical
)
km.train(xt)
print()
# coarse centroids
centroids1 = km.centroids
print("assigning the training set")
t0 = time.time()
_, assign1 = km.assign(xt)
bc = np.bincount(assign1, minlength=nc1)
print(f"done in {time.time() - t0:.2f} s. Sizes of clusters {min(bc)}-{max(bc)}")
o = assign1.argsort()
del km
# train sub-clusters
i0 = 0
c2 = []
t0 = time.time()
for c1 in range(nc1):
print(f"[{time.time() - t0:.2f} s] training sub-cluster {c1}/{nc1}\r", end="", flush=True)
i1 = i0 + bc[c1]
subset = o[i0:i1]
assert | np.all(assign1[subset] == c1) | numpy.all |
from junctiontree import computation as comp
import numpy as np
from .util import assert_potentials_equal
def get_arrays_and_vars(tree, node_list, potentials):
"""Get all arrays and their variables as a flat list
Output: [array1, vars1, ..., arrayN, varsN]
"""
return list([potentials[tree[0]],node_list[tree[0]]]) + sum(
[
get_arrays_and_vars(child_tree, node_list, potentials)
for child_tree in tree[1:]
],
[]
)
def brute_force_sum_product(tree, node_list, potentials):
"""Compute brute force sum-product with einsum """
# Function to compute the sum-product with brute force einsum
arrays_vars = get_arrays_and_vars(tree, node_list, potentials)
f = lambda output_vars: np.einsum(*(arrays_vars + [output_vars]))
def __run(tree, node_list, p, f, res=[]):
res.append(f(node_list[tree[0]]))
for child_tree in tree[1:]:
__run(child_tree, node_list, p, f, res)
return res
return __run(tree, node_list, potentials, f)
def assert_sum_product(tree, node_order, potentials, variables):
""" Test shafer-shenoy vs brute force sum-product """
# node_order represents the order nodes are traversed
# in get_arrays_and_vars function
assert_potentials_equal(
brute_force_sum_product(
tree,
[variables[idx] for idx in node_order],
[potentials[idx] for idx in node_order]
),
comp.compute_beliefs(tree, potentials, variables)
)
def test_one_scalar_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(),
],
[[]] # no variables for scalar
)
def test_one_matrix_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(2, 3),
],
[
[3,5]
]
)
def test_one_child_node_with_all_variables_shared():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 2),
np.ones((3, 2)),
],
[
[3,5],
[5,3],
[5,3]
]
)
def test_one_child_node_with_one_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.ones((3,)),
],
[
[3,5],
[5,9],
[5]
]
)
def test_one_child_node_with_no_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2),
np.random.randn(3),
np.ones(()),
],
[
[3],
[9],
[]
]
)
def test_one_grand_child_node_with_no_variable_shared_with_grand_parent():
assert_sum_product(
[
0,
(
3,
[
1,
(
4,
[
2,
]
)
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(4, 5),
np.ones((3,)),
np.ones((4,)),
],
[
[3, 5],
[5, 9],
[9, 1],
[5],
[9]
]
)
def test_one_grand_child_node_with_variable_shared_with_grand_parent():
assert_sum_product(
[
0,
(
3,
[
1,
(
4,
[
2,
]
)
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(6, 3),
np.ones((3,)),
np.ones((3,)),
],
[
[3, 5],
[5, 9],
[1, 5],
[5],
[5]
]
)
def test_two_children_with_no_variable_shared():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(2, 5),
np.ones((3,)),
np.ones((2,)),
],
[
[3, 5],
[5, 9],
[3, 1],
[5],
[3]
]
)
def test_two_child_with_shared_variable():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(3),
np.ones((3,)),
np.ones((3,)),
],
[
[3, 5],
[5, 9],
[5],
[5],
[5]
]
)
def test_two_children_with_3D_tensors():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3, 4),
np.random.randn(3, 4, 5),
np.random.randn(3, 6),
np.ones((3, 4)),
np.ones((3,)),
],
[
[3,5,7],
[5,7,9],
[5,1],
[5,7],
[5]
]
)
def test_divide_matrix_product():
# dividing messages from product when neighbor message is excluded
# this avoids re-doing einsum calculations to accomplish the same
# one full message product is calculated and messages are removed from the
# product by performing the division operation
potentials = [
np.random.randn(2, 3, 6),
np.random.randn(3, 4),
np.random.randn(2, 5),
np.ones((3,)),
np.ones((2,)),
np.ones((6,)),
np.random.randn(4, 6)
]
variables = [
[3, 5, 7],
[5, 9],
[3, 1],
[5],
[3],
[7],
[2, 7]
]
msg1 = np.einsum(potentials[1], variables[1], variables[3])
msg2 = np.einsum(potentials[2], variables[2], variables[4])
msg3 = np.einsum(potentials[6], variables[6], variables[5])
msg_prod = np.einsum(msg1, variables[3], msg2, variables[4], msg3, variables[5], variables[0])
msg_prod_x6 = | np.einsum(msg1, variables[3], msg2, variables[4], [3,5]) | numpy.einsum |
import os
import pickle
from PIL import Image
import numpy as np
import json
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class CUB(Dataset):
"""support CUB"""
def __init__(self, args, partition='base', transform=None):
super(Dataset, self).__init__()
self.data_root = args.data_root
self.partition = partition
self.data_aug = args.data_aug
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.image_size = 84
if self.partition == 'base':
self.resize_transform = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.Resize([int(self.image_size*1.15), int(self.image_size*1.15)]),
transforms.RandomCrop(size=84)
])
else:
self.resize_transform = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.Resize([int(self.image_size*1.15), int(self.image_size*1.15)]),
transforms.CenterCrop(self.image_size)
])
if transform is None:
if self.partition == 'base' and self.data_aug:
self.transform = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x).copy(),
transforms.ToTensor(),
self.normalize
])
else:
self.transform = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.ToTensor(),
self.normalize
])
else:
self.transform = transform
self.data = {}
self.file_pattern = '%s.json'
with open(os.path.join(self.data_root, self.file_pattern % partition), 'rb') as f:
meta = json.load(f)
self.imgs = []
labels = []
for i in range(len(meta['image_names'])):
image_path = os.path.join(meta['image_names'][i])
self.imgs.append(image_path)
label = meta['image_labels'][i]
labels.append(label)
# adjust sparse labels to labels from 0 to n.
cur_class = 0
label2label = {}
for idx, label in enumerate(labels):
if label not in label2label:
label2label[label] = cur_class
cur_class += 1
new_labels = []
for idx, label in enumerate(labels):
new_labels.append(label2label[label])
self.labels = new_labels
self.num_classes = np.unique(np.array(self.labels)).shape[0]
def __getitem__(self, item):
image_path = self.imgs[item]
img = Image.open(image_path).convert('RGB')
img = np.array(img).astype('uint8')
img = np.asarray(self.resize_transform(img)).astype('uint8')
img = self.transform(img)
target = self.labels[item]
return img, target, item
def __len__(self):
return len(self.labels)
class MetaCUB(CUB):
def __init__(self, args, partition='base', train_transform=None, test_transform=None, fix_seed=True):
super(MetaCUB, self).__init__(args, partition)
self.fix_seed = fix_seed
self.n_ways = args.n_ways
self.n_shots = args.n_shots
self.n_queries = args.n_queries
self.classes = list(self.data.keys())
self.n_test_runs = args.n_test_runs
self.n_aug_support_samples = args.n_aug_support_samples
self.resize_transform_train = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.Resize([int(self.image_size*1.15), int(self.image_size*1.15)]),
transforms.RandomCrop(size=84)
])
self.resize_transform_test = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.Resize([int(self.image_size*1.15), int(self.image_size*1.15)]),
transforms.CenterCrop(self.image_size)
])
if train_transform is None:
self.train_transform = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x).copy(),
transforms.ToTensor(),
self.normalize
])
else:
self.train_transform = train_transform
if test_transform is None:
self.test_transform = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.ToTensor(),
self.normalize
])
else:
self.test_transform = test_transform
self.data = {}
for idx in range(len(self.imgs)):
if self.labels[idx] not in self.data:
self.data[self.labels[idx]] = []
self.data[self.labels[idx]].append(self.imgs[idx])
self.classes = list(self.data.keys())
def _load_imgs(self, img_paths, transform):
imgs = []
for image_path in img_paths:
img = Image.open(image_path).convert('RGB')
img = np.array(img).astype('uint8')
img = transform(img)
imgs.append(np.asarray(img).astype('uint8'))
return np.asarray(imgs).astype('uint8')
def __getitem__(self, item):
if self.fix_seed:
np.random.seed(item)
cls_sampled = np.random.choice(self.classes, self.n_ways, False)
support_xs = []
support_ys = []
query_xs = []
query_ys = []
for idx, cls in enumerate(cls_sampled):
imgs_paths = self.data[cls]
support_xs_ids_sampled = np.random.choice(range(len(imgs_paths)), self.n_shots, False)
support_paths = [imgs_paths[i] for i in support_xs_ids_sampled]
support_imgs = self._load_imgs(support_paths, transform=self.resize_transform_train)
support_xs.append(support_imgs)
support_ys.append([idx] * self.n_shots)
query_xs_ids = np.setxor1d(np.arange(len(imgs_paths)), support_xs_ids_sampled)
query_xs_ids = np.random.choice(query_xs_ids, self.n_queries, False)
query_paths = [imgs_paths[i] for i in query_xs_ids]
query_imgs = self._load_imgs(query_paths, transform=self.resize_transform_test)
query_xs.append(query_imgs)
query_ys.append([idx] * query_xs_ids.shape[0])
support_xs, support_ys, query_xs, query_ys = np.array(support_xs), np.array(support_ys), np.array(query_xs), np.array(query_ys)
num_ways, n_queries_per_way, height, width, channel = query_xs.shape
query_xs = query_xs.reshape((num_ways * n_queries_per_way, height, width, channel))
query_ys = query_ys.reshape((num_ways * n_queries_per_way,))
support_xs = support_xs.reshape((-1, height, width, channel))
if self.n_aug_support_samples > 1:
support_xs = np.tile(support_xs, (self.n_aug_support_samples, 1, 1, 1))
support_ys = np.tile(support_ys.reshape((-1,)), (self.n_aug_support_samples))
support_xs = | np.split(support_xs, support_xs.shape[0], axis=0) | numpy.split |
import cv2
from .figure import FigureStatus
from collections import deque
import numpy as np
# Color space
# The values below were obtained using ImageJ (image-> adjust-> threshold)
MIN_H = 30
MAX_H = 100
MIN_S = 15
MAX_S = 176
MIN_V = 47
MAX_V = 175
lower = | np.array((MIN_H, MIN_S, MIN_V)) | numpy.array |
from tqdm import tqdm
import numpy as np
def standardize(x, std_x = None, mean_x = None, ignore_first = True):
"""Standardize the original data set."""
x = np.copy(x)
if type(mean_x) == type(None):
mean_x = np.mean(x, axis=0)
x = x - mean_x
if ignore_first:
x[:,0] = 1
if type(std_x) == type(None):
std_x = np.std(x, axis=0)
for i in range(std_x.shape[0]):
if std_x[i] > 0: x[:, i] = x[:, i] / std_x[i]
return x, mean_x, std_x
def binarize_categorical_feature(f):
""" return binary columns for each feature value """
values = sorted(list(set(f[:,0])))
assert len(values) < 10, "too many categories"
x = np.zeros((f.shape[0], 1))
for v in values:
x = np.hstack((x, f == v))
return x[:,1:]
def binarize_categorical(x, ids):
""" replace categorical feature with multiple binary ones """
x_ = np.zeros((x.shape[0], 1))
for idx in ids:
x_ = np.hstack((x_, binarize_categorical_feature(x[:, idx:idx+1])))
x = np.delete(x, ids, axis=1)
x = np.hstack((x, x_[:, 1:]))
return x
def impute_with_mean(X_, ids, missing_val = -999):
""" replace missing_val with mean value on columns ids """
X_ = np.copy(X_)
X = X_[:, ids]
X[X == missing_val] = None
nan_mean = np.nanmean(X, axis = 0)
inds = np.where(np.isnan(X))
X[inds] = np.take(nan_mean, inds[1])
X_[:, ids] = X
return X_
def add_polynomial(X, ids, max_degrees = 2):
""" add constant feature and degrees of features ids up to selected degree """
if type(max_degrees) == int:
max_degrees = [max_degrees] * len(ids)
X_orig = X
X = np.copy(X)
X = np.hstack((np.ones((X.shape[0], 1)), X))
for i, idx in enumerate(ids):
for degree in range(2, max_degrees[i] + 1):
X = np.hstack((X, np.power(X_orig[:, idx:idx+1], degree)))
return X
def indicator_missing(X, ids, missing_val = -999.):
""" add binary feature indicating if original feature was missing """
X = np.copy(X)
for idx in ids:
f_miss = 1. * (X[:, idx:idx + 1] == missing_val)
X = np.hstack((X, f_miss))
return X
def add_mult(X):
X = np.array(X)
num_features = X.shape[1]
num_to_add = num_features * (num_features - 1) // 2
res = np.hstack((np.copy(X), np.zeros((X.shape[0], num_to_add))))
idx_add = num_features
for i in range(num_features):
for j in range(i + 1, num_features):
res[:, idx_add] = np.multiply(X[:, i], X[:, j])
idx_add += 1
return res
### DATASET-SPECIFIC FUNCTIONS
need_impute = [0, 5, 6, 12, 23, 24, 25, 26, 27, 28]
categorical = [23] #+1
need_poly = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,29]
def process_X(X, degree, tpl = (None, None)):
res = None
(x_mean, x_std) = tpl
with tqdm(total=6) as pbar:
X_1 = indicator_missing(X, need_impute)
pbar.update(1)
X_2 = impute_with_mean(X_1, need_impute)
pbar.update(1)
X_22 = add_mult(X_2)
pbar.update(1)
X_3 = add_polynomial(X_22, need_poly, max_degrees = degree)
pbar.update(1)
X_4 = binarize_categorical(X_3, categorical)
pbar.update(1)
X_5, x_mean, x_std = standardize(X_4, mean_x = x_mean, std_x = x_std)
pbar.update(1)
res = X_5
return res, (x_mean, x_std)
### TESTING SECTION
def test_binarize_1():
x = np.array([[1,2,2,4]]).T
x_copy = np.copy(x)
x_ = np.array([[1,0,0],[0,1,0],[0,1,0],[0,0,1]])
assert np.all(x_ == binarize_categorical_feature(x)), "binarize_categorical_feature"
assert np.all(x == x_copy), "copy"
def test_binarize_2():
x = np.array([[1,2,2,4],[0.1,0.2,0.3,0.4],[1,-1,1,1]]).T
x_copy = np.copy(x)
x_ = np.array([[0.1,1,0,0,0,1],[0.2,0,1,0,1,0],[0.3,0,1,0,0,1],[0.4,0,0,1,0,1]])
assert np.all(x_ == binarize_categorical(x, [0,2])), "binarize_categorical"
assert np.all(x == x_copy), "copy"
def test_impute_1():
X_ = np.array([[0,1,1],[3,4,0],[0,0,-999]], dtype=np.float64)
X_copy = np.copy(X_)
X__ = np.array([[ 0. , 1. , 1. ], [ 3. , 4. , 0. ], [ 0. , 0. , 0.5]])
assert np.all(X__ == impute_with_mean(X_, [2], missing_val=-999)), "impute_with_mean"
assert np.all(X_ == X_copy), "copy"
def test_poly_1():
X = np.array([[0,1,2],[3,4,5],[0.5,0.6,0.7]])
X_copy = np.copy(X)
X_ans = np.array([[ 1. , 0. , 1. , 2. , 0. , 0. , 4. ],
[ 1. , 3. , 4. , 5. , 9. , 27. , 25. ],
[ 1. , 0.5 , 0.6 , 0.7 , 0.25 , 0.125, 0.49 ]])
assert np.allclose(X_ans, add_polynomial(X, [0,2], max_degrees = [3,2])), "add_polynomial"
assert | np.all(X_copy == X) | numpy.all |
import numpy as np
from numpy.fft import fft, rfft, ifft, irfft
import gmpy
def is_prime(n):
i = 2
while i * i <= n:
if n % i == 0:
return False
i += 1
return True
def find_next_prime(n):
while True:
n += 1
if is_prime(n):
return n
primes_list = []
def get_primes(bound):
global primes_list
pos = 0
cur_mult = 1
last_num = 1000000000
while cur_mult <= bound or pos == 0:
if pos >= len(primes_list):
primes_list.append(find_next_prime(last_num))
last_num = primes_list[pos]
cur_mult *= last_num
pos += 1
return primes_list[:pos]
def multiply_fft(v1, v2, real=False):
N = 1
while N < len(v1):
N *= 2
N *= 2
if real:
f1 = | rfft(v1, n=N) | numpy.fft.rfft |
import os.path
import PIL.Image as pimg
import nibabel as nib
import numpy as np
import torch
from torch.autograd import Variable
from in_out.image_functions import rescale_image_intensities, points_to_voxels_transform
from support.utilities.general_settings import Settings
class Image:
"""
Landmarks (i.e. labelled point sets).
The Landmark class represents a set of labelled points. This class assumes that the source and the target
have the same number of points with a point-to-point correspondence.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
# Constructor.
def __init__(self):
self.type = 'Image'
self.is_modified = True
self.affine = None
self.corner_points = None
self.bounding_box = None
self.downsampling_factor = 1
self.intensities = None # Numpy array.
self.intensities_torch = None
self.intensities_dtype = None
# Clone.
def clone(self):
clone = Image()
clone.is_modified = True
clone.affine = np.copy(self.affine)
clone.corner_points = np.copy(self.corner_points)
clone.bounding_box = np.copy(self.bounding_box)
clone.downsampling_factor = self.downsampling_factor
clone.intensities = np.copy(self.intensities)
clone.intensities_torch = self.intensities_torch.clone()
clone.intensities_dtype = self.intensities_dtype
return clone
####################################################################################################################
### Encapsulation methods:
####################################################################################################################
def get_number_of_points(self):
raise RuntimeError("Not implemented for Image yet.")
def set_affine(self, affine_matrix):
"""
The affine matrix A is a 4x4 matrix that gives the correspondence between the voxel coordinates and their
spatial positions in the 3D space: (x, y, z, 1) = A (u, v, w, 1).
See the nibabel documentation for further details (the same attribute name is used here).
"""
self.affine = affine_matrix
def set_intensities(self, intensities):
self.is_modified = True
self.intensities = intensities
def get_intensities(self):
return self.intensities
def get_intensities_torch(self):
return self.intensities_torch
def get_points(self):
image_shape = self.intensities.shape
dimension = Settings().dimension
axes = []
for d in range(dimension):
axe = np.linspace(self.corner_points[0, d], self.corner_points[2 ** d, d],
image_shape[d] // self.downsampling_factor)
axes.append(axe)
points = np.array(np.meshgrid(*axes, indexing='ij')[:])
for d in range(dimension):
points = np.swapaxes(points, d, d + 1)
return points
# @jit(parallel=True)
def get_deformed_intensities(self, deformed_points, intensities):
"""
Torch input / output.
Interpolation function with zero-padding.
"""
dimension = Settings().dimension
image_shape = self.intensities.shape
deformed_voxels = points_to_voxels_transform(deformed_points, self.affine)
if dimension == 2:
if not self.downsampling_factor == 1:
shape = deformed_points.shape
deformed_voxels = torch.nn.Upsample(size=self.intensities.shape, mode='bilinear', align_corners=True)(
deformed_voxels.permute(2, 0, 1).contiguous().view(
1, shape[2], shape[0], shape[1]))[0].permute(1, 2, 0).contiguous()
u, v = deformed_voxels.view(-1, 2)[:, 0], deformed_voxels.view(-1, 2)[:, 1]
u1 = np.floor(u.data.cpu().numpy()).astype(int)
v1 = np.floor(v.data.cpu().numpy()).astype(int)
u1 = np.clip(u1, 0, image_shape[0] - 1)
v1 = np.clip(v1, 0, image_shape[1] - 1)
u2 = np.clip(u1 + 1, 0, image_shape[0] - 1)
v2 = np.clip(v1 + 1, 0, image_shape[1] - 1)
fu = u - Variable(torch.from_numpy(u1).type(Settings().tensor_scalar_type))
fv = v - Variable(torch.from_numpy(v1).type(Settings().tensor_scalar_type))
gu = Variable(torch.from_numpy(u1 + 1).type(Settings().tensor_scalar_type)) - u
gv = Variable(torch.from_numpy(v1 + 1).type(Settings().tensor_scalar_type)) - v
deformed_intensities = (intensities[u1, v1] * gu * gv +
intensities[u1, v2] * gu * fv +
intensities[u2, v1] * fu * gv +
intensities[u2, v2] * fu * fv).view(image_shape)
elif dimension == 3:
if not self.downsampling_factor == 1:
shape = deformed_points.shape
deformed_voxels = torch.nn.Upsample(size=self.intensities.shape, mode='trilinear', align_corners=True)(
deformed_voxels.permute(3, 0, 1, 2).contiguous().view(
1, shape[3], shape[0], shape[1], shape[2]))[0].permute(1, 2, 3, 0).contiguous()
u, v, w = deformed_voxels.view(-1, 3)[:, 0], \
deformed_voxels.view(-1, 3)[:, 1], \
deformed_voxels.view(-1, 3)[:, 2]
u1_numpy = np.floor(u.data.cpu().numpy()).astype(int)
v1_numpy = np.floor(v.data.cpu().numpy()).astype(int)
w1_numpy = np.floor(w.data.cpu().numpy()).astype(int)
u1 = torch.from_numpy(np.clip(u1_numpy, 0, image_shape[0] - 1)).type(Settings().tensor_integer_type)
v1 = torch.from_numpy(np.clip(v1_numpy, 0, image_shape[1] - 1)).type(Settings().tensor_integer_type)
w1 = torch.from_numpy(np.clip(w1_numpy, 0, image_shape[2] - 1)).type(Settings().tensor_integer_type)
u2 = torch.from_numpy(np.clip(u1_numpy + 1, 0, image_shape[0] - 1)).type(Settings().tensor_integer_type)
v2 = torch.from_numpy(np.clip(v1_numpy + 1, 0, image_shape[1] - 1)).type(Settings().tensor_integer_type)
w2 = torch.from_numpy(np.clip(w1_numpy + 1, 0, image_shape[2] - 1)).type(Settings().tensor_integer_type)
fu = u - Variable(torch.from_numpy(u1_numpy).type(Settings().tensor_scalar_type))
fv = v - Variable(torch.from_numpy(v1_numpy).type(Settings().tensor_scalar_type))
fw = w - Variable(torch.from_numpy(w1_numpy).type(Settings().tensor_scalar_type))
gu = Variable(torch.from_numpy(u1_numpy + 1).type(Settings().tensor_scalar_type)) - u
gv = Variable(torch.from_numpy(v1_numpy + 1).type(Settings().tensor_scalar_type)) - v
gw = Variable(torch.from_numpy(w1_numpy + 1).type(Settings().tensor_scalar_type)) - w
deformed_intensities = (intensities[u1, v1, w1] * gu * gv * gw +
intensities[u1, v1, w2] * gu * gv * fw +
intensities[u1, v2, w1] * gu * fv * gw +
intensities[u1, v2, w2] * gu * fv * fw +
intensities[u2, v1, w1] * fu * gv * gw +
intensities[u2, v1, w2] * fu * gv * fw +
intensities[u2, v2, w1] * fu * fv * gw +
intensities[u2, v2, w2] * fu * fv * fw).view(image_shape)
else:
raise RuntimeError('Incorrect dimension of the ambient space: %d' % dimension)
return deformed_intensities
####################################################################################################################
### Public methods:
####################################################################################################################
# Update the relevant information.
def update(self):
if self.is_modified:
self._update_corner_point_positions()
self.update_bounding_box()
self.intensities_torch = Variable(torch.from_numpy(
self.intensities).type(Settings().tensor_scalar_type)).contiguous()
self.is_modified = False
def update_bounding_box(self):
"""
Compute a tight bounding box that contains all the 2/3D-embedded image data.
"""
dimension = Settings().dimension
self.bounding_box = np.zeros((dimension, 2))
for d in range(dimension):
self.bounding_box[d, 0] = np.min(self.corner_points[:, d])
self.bounding_box[d, 1] = | np.max(self.corner_points[:, d]) | numpy.max |
import os
import csv
import glob
import h5py
import shutil
import random
import numpy as np
import nibabel as nib
import multiprocessing
from multiprocessing import Pool
from joblib import Parallel, delayed
from scipy.io import loadmat
from scipy.ndimage import label as ndlabel
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def label_filtering(lab, ignored_labels, true_labels):
"""Convert the labels and replace nan and inf with zeros.
The filtered label starts from 1.
"""
lab[np.isnan(lab)] = 0
lab[np.isinf(lab)] = 0
# skip if the labels are already correct
# (a strong assumption that we always have the largest label)
if np.max(lab.ravel()) <= len(true_labels):
return lab
for ignored_label in ignored_labels:
lab[lab == ignored_label] = 0
for idx, label in enumerate(true_labels):
lab[lab == label] = idx + 1
return lab
def get_nonzero_limit(img, axis, idx_range):
"""Get the index first hyperplane containing nonzeros.
Input:
img (np.ndarray): tensor, could be 2d or 3d
axis (int): the axis to scan
idx_range (list-like): ordered indice to search
Output:
the first index at which contains a nonzero hyperplane.
"""
dim = len(img.shape)
s = [slice(None)] * dim
# scan every plane until a nonzero item is found
for idx in idx_range:
# the plane cuts through this point
s[axis] = idx
if img[s].any():
return idx
# otherwise, return the last index
return idx_range[-1]
def get_largest_component(lab):
"""Get largest connected component.
Given a multi-class labeling,
leave the largest connected component
for each class.
"""
classes = np.unique(lab)
classes = np.delete(classes, np.argwhere(classes == 0))
pruned_lab = np.zeros(lab.shape, dtype=lab.dtype)
for c in classes:
print("Finding largest connected component in class {}".format(c))
# make it black and white
bw = np.zeros(lab.shape)
bw[lab == c] = 1
# 26 connectivity for 3D images
conn = np.ones((3,3,3))
# clustered_lab.shape = bw.shape
clustered_lab, n_comps = ndlabel(bw, conn)
# sort components by volume from smallest to largest (skip zero)
comp_volumes = [np.sum(clustered_lab == i) for i in range(1, n_comps)]
comp_labels = | np.argsort(comp_volumes) | numpy.argsort |
import builtins
import warnings
import numpy as np
from aesara import config, printing
from aesara import scalar as aes
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply, Variable
from aesara.graph.op import COp, Op
from aesara.graph.params_type import ParamsType
from aesara.graph.type import Generic
from aesara.misc.safe_asarray import _asarray
from aesara.printing import pprint
from aesara.scalar.basic import BinaryScalarOp
from aesara.tensor.basic import (
alloc,
arange,
as_tensor_variable,
cast,
concatenate,
constant,
patternbroadcast,
stack,
switch,
)
from aesara.tensor.elemwise import (
CAReduce,
CAReduceDtype,
DimShuffle,
Elemwise,
scalar_elemwise,
)
from aesara.tensor.shape import shape
from aesara.tensor.type import (
complex_dtypes,
continuous_dtypes,
discrete_dtypes,
int_dtypes,
integer_dtypes,
tensor,
uint_dtypes,
)
from aesara.tensor.type_other import NoneConst
from aesara.tensor.utils import as_list
from aesara.tensor.var import TensorConstant, _tensor_py_operators
# We capture the builtins that we are going to replace to follow the numpy API
_abs = builtins.abs
if int(config.tensor__cmp_sloppy) > 1:
# This config variable is a quick-and-dirty way to get low-precision
# comparisons. For a more precise setting of these tolerances set
# them explicitly in your user code by assigning, for example,
# "aesara.tensor.math.float32_atol = ..."
# When config.tensor__cmp_sloppy>1 we are even more sloppy. This is
# useful to test the GPU as they don't use extended precision and
# this cause some difference bigger then the normal sloppy.
float16_atol = 1e-2
float16_rtol = 5e-2
float32_atol = 5e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.tensor__cmp_sloppy):
float16_atol = 5e-3
float16_rtol = 1e-2
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
else:
# If you change those value in test don't forget to put them back
# when the test end. Don't forget the case when the test fail.
float16_atol = 1e-3
float16_rtol = 1e-3
float32_atol = 1e-5
float32_rtol = 1e-5
# defaults in numpy.allclose
# Don't be more strict then numpy rtol
# It cause useless error.
float64_rtol = 1.0000000000000001e-05
float64_atol = 1e-8
def _get_atol_rtol(a, b):
tiny = ("float16",)
narrow = ("float32", "complex64")
if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):
atol = float16_atol
rtol = float16_rtol
elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):
atol = float32_atol
rtol = float32_rtol
else:
atol = float64_atol
rtol = float64_rtol
return atol, rtol
def _allclose(a, b, rtol=None, atol=None):
a = np.asarray(a)
b = np.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
return np.allclose(a, b, atol=atol_, rtol=rtol_)
class MaxAndArgmax(COp):
"""
Calculate the max and argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = "invalid axis"
params_type = Generic()
__props__ = ("axis",)
_f16_ok = True
def __init__(self, axis):
assert isinstance(axis, list)
self.axis = tuple(axis)
def get_params(self, node):
return self.axis
def make_node(self, x):
x = as_tensor_variable(x)
# We keep the original broadcastable flags for dimensions on which
# we do not perform the max / argmax.
all_axes = set(self.axis)
broadcastable = [
b for i, b in enumerate(x.type.broadcastable) if i not in all_axes
]
inputs = [x]
outputs = [
tensor(x.type.dtype, broadcastable, name="max"),
tensor("int64", broadcastable, name="argmax"),
]
return Apply(self, inputs, outputs)
def perform(self, node, inp, outs, params):
x = inp[0]
axes = params
max, max_idx = outs
if axes is None:
axes = tuple(range(x.ndim))
else:
axes = tuple(int(ax) for ax in axes)
max[0] = _asarray(np.max(x, axes), dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype="int64")
# Not-reduced axes in front
transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))
kept_shape = transposed_x.shape[: len(keep_axes)]
reduced_shape = transposed_x.shape[len(keep_axes) :]
# Numpy.prod returns 1.0 when arg is empty, so we cast it to int64
# Otherwise reshape would complain citing float arg
new_shape = kept_shape + (np.prod(reduced_shape, dtype="int64"),)
reshaped_x = transposed_x.reshape(new_shape)
max_idx[0] = _asarray( | np.argmax(reshaped_x, axis=-1) | numpy.argmax |
#!/usr/bin/env python3
import click
#from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clrs
# math
def flat(x1, x2, y1, y2, steps_per_unit):
a = np.linspace(x1, x2, int((x2-x1)*steps_per_unit + 1))[np.newaxis, ...]
b = np.linspace(y2, y1, int((y2-y1)*steps_per_unit + 1))[..., np.newaxis] * 1j
return a + b
def M(x1, x2, y1, y2, dpu, r, iters):
c = flat(x1, x2, y1, y2, dpu)
sp = c.shape
z = np.zeros(sp).astype(np.complex)
t = | np.ones(sp) | numpy.ones |
from enum import Enum
from functools import partial
from z3 import *
from anyHR.constraint.Constraint import Constraints
from anyHR.pso.pso import *
import numpy as np
def _obj_wrapper(func, args, kwargs, x):
return func(x, *args, **kwargs)
# class HRVariant(Enum):
# VANILLA = 0,
# SHRINKING = 1,
# SMT = 2,
# VANILLA_SMT = 3,
# SHRINKING_SMT = 4
# CDHR = 5
class DirectionSampling(Enum):
RDHR = 0
CDHR = 1
class Shrinking(Enum):
NO_SHRINKING = 0
SHRINKING = 1
class InitPoint(Enum):
PSO = 0
SMT = 1
class HitAndRun:
def __init__(self, constraint, bounding_box, direction_sampling=DirectionSampling.RDHR,
shrinking=Shrinking.NO_SHRINKING, init_point=InitPoint.PSO):
self.constraint = constraint
self.bounding_box = bounding_box
self.shrinking = shrinking
self.direction_sampling = direction_sampling
self.init_point = init_point
assert(self.init_point == InitPoint.PSO or self.constraint.is_polynomial), \
"You cannot use non-polynomial constraints with SMT"
# set the starting point- either with optimizer or with smt solver
if init_point == InitPoint.PSO:
self.starting_point = self._starting_point_pso()
else: # using z3/SMT
self.starting_point = self._starting_point_smt()
self.current_point = self.starting_point
# set the function handles according to the options (direction set, shrinking)
# Beware: the next_sample() function is being set here. This means
if direction_sampling == DirectionSampling.RDHR and shrinking == Shrinking.NO_SHRINKING: # simple RDHR
self.next_sample = self._next_sample_vanilla
elif direction_sampling == DirectionSampling.RDHR and shrinking == Shrinking.SHRINKING: # RDHR + Shrinking
self.next_sample = self._next_sample_rdhr_shrinking
elif direction_sampling == DirectionSampling.CDHR and shrinking == Shrinking.NO_SHRINKING: # simple CDHR
self.next_sample = self._next_sample_cdhr
elif direction_sampling == DirectionSampling.CDHR and shrinking == Shrinking.SHRINKING:
self.next_sample = self.next_sample_cdhr_shrinking
def sampler(self, n, burn_in_period=100):
# should be the easiest way to get a sample- simple wrapper which needs little to none of the smaller methods
dim = len(self.constraint.var_name_list)
# start = time.perf_counter()
samples = np.ndarray((dim, n))
rejections = 0
for i in range(n):
for j in range(burn_in_period):
sample, rejections_this_sample = self.next_sample()
rejections += rejections_this_sample
# TODO does this number make sense to compare? Maybe average rej/sample?
sample, rejections_this_sample = self.next_sample()
rejections += rejections_this_sample
samples[:, i] = sample
# end = time.perf_counter()
# elapsed = end - start
return samples
# def next_sample(self):
# if self.variant == HRVariant.VANILLA or self.variant == HRVariant.VANILLA_SMT:
# return self.next_sample_vanilla()
# elif self.variant == HRVariant.SHRINKING or self.variant == HRVariant.SHRINKING_SMT:
# return self.next_sample_with_shrinking()
# elif self.variant == HRVariant.CDHR:
# return self._next_sample_cdhr()
# else:
# return self.next_sample_z3()
def _next_sample_vanilla(self):
# normal random directions hit-and-run (rdhr)
b = self.current_point
success = False
while not success:
a = self._random_direction()
inter, success = self._line_box_intersection(a, b, self.bounding_box)
inter_1 = inter[0]
inter_2 = inter[1]
success = False
rejections = -1
while not success:
rnd = np.random.uniform()
sample = []
for i in range(len(inter_1)):
x_0 = inter_1[i]
x_1 = inter_2[i]
s_i = (x_1 - x_0) * rnd + x_0
sample.append(s_i)
success = self.constraint.evaluate(sample)
rejections += 1
self.current_point = sample
return sample, rejections
def _next_sample_rdhr_shrinking(self):
b = self.current_point
success = False
while not success:
a = self._random_direction()
inter, success = self._line_box_intersection(a, b, self.bounding_box)
success = False
rejections = -1
inter_1 = inter[0]
inter_2 = inter[1]
r_1 = (inter_1[0] - b[0]) / a[0]
r_2 = (inter_2[0] - b[0]) / a[0]
r_min = min(r_1, r_2)
r_max = max(r_1, r_2)
while not success:
rnd = np.random.uniform(r_min, r_max)
sample = []
for i in range(len(inter_1)):
s_i = b[i] + rnd * a[i]
sample.append(s_i)
success = self.constraint.evaluate(sample)
if not success:
if rnd > 0:
r_max = rnd
else:
r_min = rnd
rejections += 1
# FELIX: fixed bug in next line, 30.03.
self.current_point = sample
return sample, rejections
def _next_sample_cdhr(self):
b = self.current_point
success = False
while not success:
a, direction_int = self._random_direction_cdhr()
inter, success = self._line_box_intersection_cdhr(a, direction_int, b, self.bounding_box)
inter_1 = inter[0]
inter_2 = inter[1]
success = False
rejections = -1
while not success:
rnd = np.random.uniform()
sample = []
for i in range(len(inter_1)):
x_0 = inter_1[i]
x_1 = inter_2[i]
s_i = (x_1 - x_0) * rnd + x_0
sample.append(s_i)
success = self.constraint.evaluate(sample)
rejections += 1
self.current_point = sample
return sample, rejections
def next_sample_cdhr_shrinking(self):
# assert (False), 'Not yet implemented.' # TODO check whether this is even theoretically sound
b = self.current_point
success = False
while not success: # This should only do one iteration?
a, direction_int = self._random_direction_cdhr()
inter, success = self._line_box_intersection_cdhr(a, direction_int, b, self.bounding_box)
inter_1 = inter[0]
inter_2 = inter[1]
# on the numer line, minus b[dir_int] describes the translation such that the current point now has value zero
# in the axis parallel to the chosen line
r_min = inter_1[direction_int] - b[direction_int]
r_max = inter_2[direction_int] - b[direction_int]
success = False
rejections = -1
while not success:
rnd = np.random.uniform(r_min, r_max)
# sample = []
# for i in range(len(inter_1)):
# s_i = b[i] + rnd * a[i]
# sample.append(s_i)
sample = b.copy()
sample[direction_int] += rnd # only one component changes in cdhr and vector is unit vector. simply add
success = self.constraint.evaluate(sample)
if not success:
if rnd > 0:
r_max = rnd
else:
r_min = rnd
rejections += 1
# FELIX: fixed bug in next line, 30.03.
self.current_point = sample
return sample, rejections
def _line_box_intersection(self, a, b, box):
potential_solutions = []
for i in range(len(a)):
box_i = box[i]
box_i_min = box_i[0]
box_i_max = box_i[1]
t_min = (box_i_min - b[i]) / a[i]
t_max = (box_i_max - b[i]) / a[i]
potential_solutions.append(t_min)
potential_solutions.append(t_max)
solutions = []
for ps in potential_solutions:
is_solution = True
solution = []
for i in range(len(a)):
box_i = box[i]
result = a[i] * ps + b[i]
solution.append(result)
if result < box_i[0] or result > box_i[1]:
is_solution = False
break
if is_solution:
solutions.append(solution)
if len(solutions) >= 2:
flag = True
else:
flag = False
return solutions, flag
def _line_box_intersection_cdhr(self, a, direction_int, b, box):
# this is much easier since we know in which dimensions the hyperplanes are hit
potential_solutions = []
# i = np.argmax(np.abs(a)) # where the 1 is (cdhr has only vectors (0,...,1,0,...,0))
i = direction_int
box_i = box[i]
box_i_min = box_i[0]
box_i_max = box_i[1]
t_min = (box_i_min - b[i]) / a[i]
t_max = (box_i_max - b[i]) / a[i]
potential_solutions.append(t_min)
potential_solutions.append(t_max)
solutions = []
for ps in potential_solutions:
is_solution = True
solution = a * ps + b
solutions.append(solution)
if len(solutions) >= 2:
flag = True
else:
flag = False
return solutions, flag
def _sort_first(self, l):
return l[0]
# def _starting_point(self):
# if self.variant == HRVariant.SMT or self.variant == HRVariant.SHRINKING_SMT or self.variant == HRVariant.VANILLA_SMT or self.variant == HRVariant.CDHR:
# return self._starting_point_smt()
# else:
# return self._starting_point_pso()
def _starting_point_pso(self):
lb = []
ub = []
for b in self.bounding_box:
lb.append(b[0])
ub.append(b[1])
# out, rob_opt = pso(self._evaluate, lb, ub, f_ieqcons=None, swarmsize=10, omega=0.5, phip=0.5,
# phig=0.5, maxiter=10, minstep=1e-2, minfunc=0.1, debug=False)
out, rob_opt, budget = pso(self._evaluate, lb, ub, f_ieqcons=None, swarmsize=10000, omega=0.5, phip=0.5,
phig=0.5, maxiter=10000, minstep=1e-2, minfunc=0.1, debug=False)
if rob_opt > 0:
raise Exception('Could not find a starting point in the set.')
return out
def _evaluate(self, sample):
return -self.constraint.q_evaluate(sample)
def _starting_point_smt(self):
solver = self.constraint.solver
status = solver.check()
if status == unsat:
raise Exception('The set of constraints is unsatisfiable.')
model = solver.model()
out_d = dict()
for var in model:
value = model[var]
num = float(value.numerator_as_long())
den = float(value.denominator_as_long())
out_d[str(var)] = num / den
out = []
for var in self.constraint.var_name_list:
out.append(out_d[var])
return out
def _random_direction(self):
direction = []
for i in range(self.constraint.dimensions):
dir = np.random.uniform(-1, 1)
direction.append(dir)
direction = | np.array(direction) | numpy.array |
#!/usr/bin/env python3
##############
# ver 2.1 - coding python by <NAME> on 2/3/2019
# instead of ml_wr.py, we divide several files.
# ver 2.2 - add n_ensemble option on 2/21/2019
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='generate data block for machine learning input')
## args
parser.add_argument('-i', '--input', default='target.list', nargs='?',
help='input list file (format $file_index $temperature/density)')
parser.add_argument('-ipf', '--input_prefix', default='grid', nargs='?',
help='prefix of input grid .npy file')
parser.add_argument('-s1', '--select1', default=0.5, nargs='?', type=float,
help='select temperature/density1 (< args.select2) for training set')
parser.add_argument('-s2', '--select2', default=1.0, nargs='?', type=float,
help='select temperature/density2 (> args.select1) for training set')
parser.add_argument('-prop', '--prop', default=-1.0, nargs='?', type=float,
help='the proportion [0:1] of training set for getting accuracy of modeling (< 0. means nothing test set)')
parser.add_argument('-nb', '--n_blocks', default=0, nargs='?', type=int,
help='# of blocks for training set (zero means no block average sets)')
parser.add_argument('-nbe', '--n_blocks_eval', default=0, nargs='?', type=int,
help='# of blocks for eval set (zero means no block average sets)')
parser.add_argument('-net', '--ne_train', default=-1, nargs='?', type=int,
help='# of ensembles for train set per grid.npy (-1 to use all)')
parser.add_argument('-nee', '--ne_eval', default=-1, nargs='?', type=int,
help='# of ensembles for eval set per grid.npy (-1 to use all)')
parser.add_argument('-ng', '--n_grids', default=15, nargs='?', type=int,
help='# of grids for data sets')
parser.add_argument('-seed', '--seed', default=1985, nargs='?', type=int,
help='random seed to shuffle for test sets and block sets')
parser.add_argument('-o1', '--out_train', default='train', nargs='?',
help='prefix of output training set .npy file like train.(coord/temp/cat).$i.npy')
parser.add_argument('-o2', '--out_test', default='test', nargs='?',
help='prefix of output test set .npy file for accuracy like test.(coord/temp/cat).npy')
parser.add_argument('-o3', '--out_eval', default='eval', nargs='?',
help='prefix of output Tc evaluation set .npy file like eval.(coord/temp).npy')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 2.2')
# read args
args = parser.parse_args()
# check args
print(" input arguments: {0}".format(args))
# import modules
import numpy as np
import scipy as sc
import math
import copy
np.random.seed(args.seed)
# step1: read list file and split to train, test, and eval sets.
list_file = np.loadtxt(args.input)
list_temp = np.array(list_file[:,0],dtype=float)
list_file_idx = np.array(list_file[:,1],dtype=int)
train_set1 = np.where(list_temp <= args.select1)[0] # indices for temp1 of training
train_set2 = np.where(list_temp >= args.select2)[0] # indices for temp2 of training
eval_set = np.delete(np.arange(len(list_file_idx)), np.append(train_set1,train_set2)) # indices for eval
# make train_set and test_set with proportion and shuffle
if args.prop > 0.0:
if args.prop >= 0.5:
raise ValueError("args.prop {} is too high unlike purpose".format(args.prop))
n_test1 = int(len(train_set1)*args.prop)
n_test2 = int(len(train_set2)*args.prop)
np.random.shuffle(train_set1)
np.random.shuffle(train_set2)
test_set = np.append(train_set1[0:n_test1],train_set2[0:n_test2])
train_set1 = train_set1[n_test1:]
train_set2 = train_set2[n_test2:]
else:
print(" Not make test set")
np.random.shuffle(train_set1)
np.random.shuffle(train_set2)
test_set = np.array([],dtype=int)
print("Based on {} list file: ".format(args.input))
print(" total #train data: {} for temp <= {}, {} for temp >= {}".format(len(train_set1),args.select1,len(train_set2),args.select2))
print(" #test data: {}".format(len(test_set)))
print(" #eval data: {}".format(len(eval_set)))
# step2: make blocks for training sets.
if args.n_blocks > 0:
remain_1 = len(train_set1)%args.n_blocks
remain_2 = len(train_set2)%args.n_blocks
print(" trim ({},{}) elements from two training sets for equal size of block sets".format(remain_1,remain_2))
if remain_1 > 0:
train_set1 = train_set1[remain_1:]
if remain_2 > 0:
train_set2 = train_set2[remain_2:]
block_sets1 = np.split(train_set1,args.n_blocks)
block_sets2 = np.split(train_set2,args.n_blocks)
print(" #blocks for training set = {}".format(args.n_blocks))
else:
print(" no blocks for training sets")
block_sets1 = train_set1
block_sets2 = train_set2
# step3: make blocks for evaluation sets:
if args.n_blocks_eval > 0:
if len(eval_set)%args.n_blocks_eval != 0 :
raise ValueError("n_blocks_eval value is not good to splite eval_set ({} % {} != 0)".format(len(eval_set),args.n_blocks_eval))
block_sets_eval = np.split(eval_set,args.n_blocks_eval)
print(" #blocks for eval set = {}".format(args.n_blocks_eval))
else:
print(" no blocks for eval sets")
block_sets_eval = eval_set
# without padding
def make_npy_files_mode_ver0(mode, i_block, idx_array, input_prefix, output_prefix):
# mode = test/eval/train
if ("test" in mode) or ("train" in mode):
gen_cat = True
else:
gen_cat = False # eval case
# initialzie arrays
# As for eval set, we only use original grid info excluding ensembles or copies by trans, rot, and flip.
n_data = len(idx_array)
if gen_cat:
set_coord=np.empty((n_data,n_ensembles*pow(args.n_grids,3)))
set_temp=np.empty((n_data,n_ensembles))
set_cat=np.empty((n_data,n_ensembles))
esti_n_sets = n_ensembles
else: # eval case
set_coord=np.empty((n_data,n_eval_ensembles*pow(args.n_grids,3)))
set_temp= | np.empty((n_data,n_eval_ensembles)) | numpy.empty |
#%%
import sys
import os
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
sys.path.append('/Users/mwinding/repos/maggot_models')
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
adj_ad = pm.Promat.pull_adj(type_adj='ad', subgraph='brain')
#%%
# pull sensory annotations and then pull associated skids
order = ['olfactory', 'gustatory-external', 'gustatory-pharyngeal', 'enteric', 'thermo-warm', 'thermo-cold', 'visual', 'noci', 'mechano-Ch', 'mechano-II/III', 'proprio', 'respiratory']
sens = [ct.Celltype(name, ct.Celltype_Analyzer.get_skids_from_meta_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
#%%
# cascades from each sensory modality
import pickle
p = 0.05
max_hops = 10
n_init = 1000
simultaneous = True
adj=adj_ad
'''
input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, source_names = order, stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous)
pickle.dump(input_hit_hist_list, open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'wb'))
'''
input_hit_hist_list = pickle.load(open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'rb'))
# %%
# plot sensory cascades raw
fig, axs = plt.subplots(len(input_hit_hist_list), 1, figsize=(10, 20))
fig.tight_layout(pad=2.0)
for i, hit_hist in enumerate(input_hit_hist_list):
ax = axs[i]
sns.heatmap(hit_hist.skid_hit_hist, ax=ax)
ax.set_xlabel(hit_hist.get_name())
plt.savefig('cascades/plots/sensory_modality_signals.pdf', format='pdf', bbox_inches='tight')
os.system('say "code executed"')
# %%
# how close are descending neurons to sensory?
# load output types
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dSEZ = pymaid.get_skids_by_annotation('mw dSEZ')
RGN = pymaid.get_skids_by_annotation('mw RGN')
# generate Cascade_Analyzer objects containing name of pathway and the hit_hist to each output type
dVNC_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dVNC', hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list]
dSEZ_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dSEZ', hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list]
RGN_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-RGN', hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list]
dVNC_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dVNC'], hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list]
dSEZ_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dSEZ'], hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list]
RGN_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'RGN'], hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list]
# max possible hits that all output neuron types could receive
max_dVNC_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
max_dSEZ_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
max_RGN_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
# organize data so that each sens -> dVNC, dSEZ, RGN is intercalated
sens_output_data = list(zip(dVNC_hits, dSEZ_hits, RGN_hits))
sens_output_data = [x for sublist in sens_output_data for x in sublist]
sens_output_df = pd.DataFrame([x.skid_hit_hist.sum(axis=0) for x in sens_output_data])
# set up multiindex
sens_output_df['source']=[x.get_name()[0] for x in sens_output_data]
sens_output_df['target']=[x.get_name()[1] for x in sens_output_data]
sens_output_df = sens_output_df.set_index(['source', 'target'])
# normalize by max possible input to each output type (num neurons * n_init)
sens_output_df_plot = sens_output_df.copy()
sens_output_df_plot.loc[(slice(None), 'dVNC'), :] = sens_output_df_plot.loc[(slice(None), 'dVNC'), :]/max_dVNC_hits
sens_output_df_plot.loc[(slice(None), 'dSEZ'), :] = sens_output_df_plot.loc[(slice(None), 'dSEZ'), :]/max_dSEZ_hits
sens_output_df_plot.loc[(slice(None), 'RGN'), :] = sens_output_df_plot.loc[(slice(None), 'RGN'), :]/max_RGN_hits
import cmasher as cmr
fig, ax = plt.subplots(1, 1, figsize=(1.5, 2))
fig.tight_layout(pad=3.0)
vmax = 0.35
cmap = cmr.torch
sns.heatmap(sens_output_df_plot, ax = ax, cmap = cmap, vmax=vmax)
ax.set_title('Signal to brain outputs')
ax.set(xlim = (0, 11))
plt.savefig('cascades/plots/sensory_modality_signals_to_output.pdf', format='pdf', bbox_inches='tight')
# determine mean/median hop distance from sens -> output
def counts_to_list(count_list):
expanded_counts = []
for i, count in enumerate(count_list):
expanded = np.repeat(i, count)
expanded_counts.append(expanded)
return([x for sublist in expanded_counts for x in sublist])
all_sens_output_dist = []
for row in sens_output_df.iterrows():
list_hits = counts_to_list(row[1])
all_sens_output_dist.append([row[0][0], row[0][1], np.mean(list_hits), np.median(list_hits)])
all_sens_output_dist = pd.DataFrame(all_sens_output_dist, columns = ['source', 'target', 'mean_hop', 'median_hop'])
# %%
# plotting visits by modality to each descending to VNC neuron pair
# supplemental figure
dVNC_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dVNC_hits]
dVNC_hits_summed = pd.concat(dVNC_hits_summed, axis=1)
dVNC_hits_pairwise = pm.Promat.convert_df_to_pairwise(dVNC_hits_summed)
dSEZ_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dSEZ_hits]
dSEZ_hits_summed = pd.concat(dSEZ_hits_summed, axis=1)
dSEZ_hits_pairwise = pm.Promat.convert_df_to_pairwise(dSEZ_hits_summed)
RGN_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in RGN_hits]
RGN_hits_summed = pd.concat(RGN_hits_summed, axis=1)
RGN_hits_pairwise = pm.Promat.convert_df_to_pairwise(RGN_hits_summed)
fig, axs = plt.subplots(
3, 1, figsize=(8, 8)
)
fig.tight_layout(pad=3.0)
ax = axs[0]
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual VNC Descending Neurons')
sns.heatmap(dVNC_hits_pairwise.T, ax = ax)
ax = axs[1]
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual SEZ Descending Neurons')
sns.heatmap(dSEZ_hits_pairwise.T, ax = ax)
ax = axs[2]
ax.set_xlabel('Individual Ring Gland Neurons')
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual Ring Gland Neurons')
sns.heatmap(RGN_hits_pairwise.T, ax = ax)
plt.savefig('cascades/plots/signal_to_individual_outputs.pdf', format='pdf', bbox_inches='tight')
#%%
# alternative clustermap plot of descending neurons
# supplemental figure plot
vmax = n_init
fig = sns.clustermap(dVNC_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual dVNCs')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_dVNCs.pdf', format='pdf', bbox_inches='tight')
fig = sns.clustermap(dSEZ_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual dSEZs')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_dSEZs.pdf', format='pdf', bbox_inches='tight')
fig = sns.clustermap(RGN_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual RG neurons')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_RGs.pdf', format='pdf', bbox_inches='tight')
# %%
# distribution summary of signal to output neurons
dVNC_dist = (dVNC_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
dSEZ_dist = (dSEZ_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
RGN_dist = (RGN_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
dist_data = pd.DataFrame(list(zip(dVNC_dist.values, ['dVNC']*len(dVNC_dist))) + list(zip(dSEZ_dist.values, ['dSEZ']*len(dSEZ_dist))) + list(zip(RGN_dist.values, ['RGN']*len(RGN_dist))),
columns = ['combinations', 'type'])
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.stripplot(data = dist_data, y = 'combinations', x='type', s=1, ax=ax)
fig.savefig('cascades/plots/signal_to_outputs_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = dVNC_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_dVNC_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = dSEZ_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_dSEZ_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = RGN_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_RGN_dist.pdf', format='pdf', bbox_inches='tight')
# %%
# parallel coordinates plots
from pandas.plotting import parallel_coordinates
linewidth = 0.75
alpha = 0.8
very_low_color = '#D7DF23'
low_color = '#C2DD26'
med_color = '#8DC63F'
high_color = '#00A651'
data = dVNC_hits_pairwise.groupby('pair_id').sum()
very_low = (dVNC_dist<=1)
low = (dVNC_dist>1) & (dVNC_dist<4)
med = (dVNC_dist>=4) & (dVNC_dist<8)
high = dVNC_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, med_color, low_color, very_low_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-dVNC_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
data = dSEZ_hits_pairwise.groupby('pair_id').sum()
very_low = (dSEZ_dist<=1)
low = (dSEZ_dist>1) & (dSEZ_dist<4)
med = (dSEZ_dist>=4) & (dSEZ_dist<8)
high = dSEZ_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, low_color, med_color, very_low_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-dSEZ_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
data = RGN_hits_pairwise.groupby('pair_id').sum()
very_low = (RGN_dist<=1)
low = (RGN_dist>1) & (RGN_dist<4)
med = (RGN_dist>=4) & (RGN_dist<8)
high = RGN_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, low_color, very_low_color, med_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-RGN_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
# %%
# PCA of descending input
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
data = dVNC_hits_pairwise.groupby('pair_id').sum()
data['type'] = ['dVNC']*len(data)
data2 = dSEZ_hits_pairwise.groupby('pair_id').sum()
data2['type'] = ['dSEZ']*len(data2)
data3 = RGN_hits_pairwise.groupby('pair_id').sum()
data3['type'] = ['RGN']*len(data3)
data = pd.concat([data, data2, data3])
x = data.drop(columns='type').values
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['pc1', 'pc2'], index=data.index)
principalDf['type'] = data['type']
ylim = (-2.25, 2.25)
xlim = (-5, 6)
size = 3
alpha = 0.75
# plot dVNC PCA
plot_data = principalDf[principalDf.type=='dVNC']
low = (dVNC_dist<4)
med = (dVNC_dist>=4) & (dVNC_dist<10)
high = dVNC_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-dVNC_PCA.pdf', format='pdf', bbox_inches='tight')
# plot dSEZ PCA
plot_data = principalDf[principalDf.type=='dSEZ']
low = (dSEZ_dist<4)
med = (dSEZ_dist>=4) & (dSEZ_dist<10)
high = dSEZ_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-dSEZ_PCA.pdf', format='pdf', bbox_inches='tight')
# plot RGN PCA
plot_data = principalDf[principalDf.type=='RGN']
low = (RGN_dist<4)
med = (RGN_dist>=4) & (RGN_dist<10)
high = RGN_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-RGN_PCA.pdf', format='pdf', bbox_inches='tight')
# %%
# bar plot of high, med, low categories for each type of output
integration_data = [['dVNC', 'high', sum(dVNC_dist>=10)],
['dVNC', 'med', sum((dVNC_dist>=4) & (dVNC_dist<10))],
['dVNC', 'low', sum(dVNC_dist<4)],
['dSEZ', 'high', sum(dSEZ_dist>=10)],
['dSEZ', 'med', sum((dSEZ_dist>=4) & (dSEZ_dist<10))],
['dSEZ', 'low', sum(dSEZ_dist<4)],
['RGN', 'high', sum(RGN_dist>=10)],
['RGN', 'med', sum((RGN_dist>=4) & (RGN_dist<10))],
['RGN', 'low', sum(RGN_dist<4)]]
integration_data = pd.DataFrame(integration_data, columns = ['class', 'type', 'count'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.barplot(data = integration_data, x='class', y='count', hue='type', hue_order = ['high', 'med', 'low'], ax=ax)
fig.savefig('cascades/plots/signal-integration-counts_dVNCs.pdf', format='pdf', bbox_inches='tight')
# %%
##########
# **** Note Well: REALLY old code below, deprecated or never used in paper ****
##########
# %%
# num of descendings at each level
# this assumes that thresholding per node is useful; it might not be
threshold = 50
num_dVNC_dsSens = pd.DataFrame(([np.array(dVNC_ORN_hit>threshold).sum(axis = 0),
np.array(dVNC_AN_hit>threshold).sum(axis = 0),
np.array(dVNC_MN_hit>threshold).sum(axis = 0),
np.array(dVNC_A00c_hit>threshold).sum(axis = 0),
np.array(dVNC_vtd_hit>threshold).sum(axis = 0),
np.array(dVNC_thermo_hit>threshold).sum(axis = 0),
np.array(dVNC_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
num_dSEZ_dsSens = pd.DataFrame(([np.array(dSEZ_ORN_hit>threshold).sum(axis = 0),
np.array(dSEZ_AN_hit>threshold).sum(axis = 0),
np.array(dSEZ_MN_hit>threshold).sum(axis = 0),
np.array(dSEZ_A00c_hit>threshold).sum(axis = 0),
np.array(dSEZ_vtd_hit>threshold).sum(axis = 0),
np.array(dSEZ_thermo_hit>threshold).sum(axis = 0),
np.array(dSEZ_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
num_RG_dsSens = pd.DataFrame(([np.array(RG_ORN_hit>threshold).sum(axis = 0),
np.array(RG_AN_hit>threshold).sum(axis = 0),
np.array(RG_MN_hit>threshold).sum(axis = 0),
np.array(RG_A00c_hit>threshold).sum(axis = 0),
np.array(RG_vtd_hit>threshold).sum(axis = 0),
np.array(RG_thermo_hit>threshold).sum(axis = 0),
np.array(RG_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
fig, axs = plt.subplots(
3, 1, figsize=(8, 8)
)
fig.tight_layout(pad=3.0)
vmax = 50
cmap = cmr.heat
ax = axs[0]
ax.set_title('Number of VNC Descending Neurons downstream of Sensory Signal')
sns.heatmap(num_dVNC_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set(xlim = (0, 13))
ax = axs[1]
ax.set_title('Number of SEZ Descending Neurons downstream of Sensory Signal')
sns.heatmap(num_dSEZ_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set(xlim = (0, 13))
ax = axs[2]
ax.set_title('Number of Ring Gland Neurons downstream of Sensory Signal')
sns.heatmap(num_RG_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set_xlabel('Hops from sensory')
ax.set(xlim = (0, 13))
plt.savefig('cascades/plots/number_outputs_ds_each_sensory_modality.pdf', format='pdf', bbox_inches='tight')
# %%
# When modality are each outputs associated with?
dVNC_hits = pd.DataFrame(([ dVNC_skids,
dVNC_ORN_hit.sum(axis = 1),
dVNC_AN_hit.sum(axis = 1),
dVNC_MN_hit.sum(axis = 1),
dVNC_thermo_hit.sum(axis = 1),
dVNC_photo_hit.sum(axis = 1),
dVNC_A00c_hit.sum(axis = 1),
dVNC_vtd_hit.sum(axis = 1)]),
index = ['dVNC_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
dVNC_hits = dVNC_hits.T
dSEZ_hits = pd.DataFrame(([ dSEZ_skids,
dSEZ_ORN_hit.sum(axis = 1),
dSEZ_AN_hit.sum(axis = 1),
dSEZ_MN_hit.sum(axis = 1),
dSEZ_thermo_hit.sum(axis = 1),
dSEZ_photo_hit.sum(axis = 1),
dSEZ_A00c_hit.sum(axis = 1),
dSEZ_vtd_hit.sum(axis = 1)]),
index = ['dSEZ_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
dSEZ_hits = dSEZ_hits.T
RG_hits = pd.DataFrame(([ RG_skids,
RG_ORN_hit.sum(axis = 1),
RG_AN_hit.sum(axis = 1),
RG_MN_hit.sum(axis = 1),
RG_thermo_hit.sum(axis = 1),
RG_photo_hit.sum(axis = 1),
RG_A00c_hit.sum(axis = 1),
RG_vtd_hit.sum(axis = 1)]),
index = ['RG_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
RG_hits = RG_hits.T
# %%
# sensory characterization of each layer of each sensory modality
import plotly.express as px
from pandas.plotting import parallel_coordinates
# replacement if I want to use this later
#sensory_profiles = [hit_hist.skid_hit_hist.sum(axis=1).values for hit_hist in input_hit_hist_list]
#sensory_profiles = pd.DataFrame(sensory_profiles, index=[hit_hist.get_name() for hit_hist in input_hit_hist_list], columns = input_hit_hist_list[0].skid_hit_hist.index)
sensory_profile = pd.DataFrame(([ORN_hit_hist.sum(axis = 1),
AN_hit_hist.sum(axis = 1),
MN_hit_hist.sum(axis = 1),
A00c_hit_hist.sum(axis = 1),
vtd_hit_hist.sum(axis = 1),
thermo_hit_hist.sum(axis = 1),
photo_hit_hist.sum(axis = 1)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile0 = pd.DataFrame(([ORN_hit_hist[:, 0],
AN_hit_hist[:, 0],
MN_hit_hist[:, 0],
A00c_hit_hist[:, 0],
vtd_hit_hist[:, 0],
thermo_hit_hist[:, 0],
photo_hit_hist[:, 0]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile1 = pd.DataFrame(([ORN_hit_hist[:, 1],
AN_hit_hist[:, 1],
MN_hit_hist[:, 1],
A00c_hit_hist[:, 1],
vtd_hit_hist[:, 1],
thermo_hit_hist[:, 1],
photo_hit_hist[:, 1]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile2 = pd.DataFrame(([ORN_hit_hist[:, 2],
AN_hit_hist[:, 2],
MN_hit_hist[:, 2],
A00c_hit_hist[:, 2],
vtd_hit_hist[:, 2],
thermo_hit_hist[:, 2],
photo_hit_hist[:, 2]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile3 = pd.DataFrame(([ORN_hit_hist[:, 3],
AN_hit_hist[:, 3],
MN_hit_hist[:, 3],
A00c_hit_hist[:, 3],
vtd_hit_hist[:, 3],
thermo_hit_hist[:, 3],
photo_hit_hist[:, 3]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile4 = pd.DataFrame(([ORN_hit_hist[:, 4],
AN_hit_hist[:, 4],
MN_hit_hist[:, 4],
A00c_hit_hist[:, 4],
vtd_hit_hist[:, 4],
thermo_hit_hist[:, 4],
photo_hit_hist[:, 4]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile5 = pd.DataFrame(([ORN_hit_hist[:, 5],
AN_hit_hist[:, 5],
MN_hit_hist[:, 5],
A00c_hit_hist[:, 5],
vtd_hit_hist[:, 5],
thermo_hit_hist[:, 5],
photo_hit_hist[:, 5]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile6 = pd.DataFrame(([ORN_hit_hist[:, 6],
AN_hit_hist[:, 6],
MN_hit_hist[:, 6],
A00c_hit_hist[:, 6],
vtd_hit_hist[:, 6],
thermo_hit_hist[:, 6],
photo_hit_hist[:, 6]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile7 = pd.DataFrame(([ORN_hit_hist[:, 7],
AN_hit_hist[:, 7],
MN_hit_hist[:, 7],
A00c_hit_hist[:, 7],
vtd_hit_hist[:, 7],
thermo_hit_hist[:, 7],
photo_hit_hist[:, 7]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile8 = pd.DataFrame(([ORN_hit_hist[:, 8],
AN_hit_hist[:, 8],
MN_hit_hist[:, 8],
A00c_hit_hist[:, 8],
vtd_hit_hist[:, 8],
thermo_hit_hist[:, 8],
photo_hit_hist[:, 8]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile = sensory_profile.T
sensory_profile0 = sensory_profile0.T
sensory_profile1 = sensory_profile1.T
sensory_profile2 = sensory_profile2.T
sensory_profile3 = sensory_profile3.T
sensory_profile4 = sensory_profile4.T
sensory_profile5 = sensory_profile5.T
sensory_profile6 = sensory_profile6.T
sensory_profile7 = sensory_profile7.T
sensory_profile8 = sensory_profile8.T
#%%
# multisensory elements per layer (apples to apples)
threshold = 25
ORN0_indices = np.where(ORN_hit_hist[:, 0]>threshold)[0]
ORN1_indices = np.where(ORN_hit_hist[:, 1]>threshold)[0]
ORN2_indices = np.where(ORN_hit_hist[:, 2]>threshold)[0]
ORN3_indices = np.where(ORN_hit_hist[:, 3]>threshold)[0]
ORN4_indices = np.where(ORN_hit_hist[:, 4]>threshold)[0]
ORN5_indices = np.where(ORN_hit_hist[:, 5]>threshold)[0]
ORN6_indices = np.where(ORN_hit_hist[:, 6]>threshold)[0]
ORN7_indices = np.where(ORN_hit_hist[:, 7]>threshold)[0]
ORN8_indices = np.where(ORN_hit_hist[:, 8]>threshold)[0]
AN0_indices = np.where(AN_hit_hist[:, 0]>threshold)[0]
AN1_indices = np.where(AN_hit_hist[:, 1]>threshold)[0]
AN2_indices = np.where(AN_hit_hist[:, 2]>threshold)[0]
AN3_indices = np.where(AN_hit_hist[:, 3]>threshold)[0]
AN4_indices = np.where(AN_hit_hist[:, 4]>threshold)[0]
AN5_indices = np.where(AN_hit_hist[:, 5]>threshold)[0]
AN6_indices = np.where(AN_hit_hist[:, 6]>threshold)[0]
AN7_indices = np.where(AN_hit_hist[:, 7]>threshold)[0]
AN8_indices = np.where(AN_hit_hist[:, 8]>threshold)[0]
MN0_indices = np.where(MN_hit_hist[:, 0]>threshold)[0]
MN1_indices = np.where(MN_hit_hist[:, 1]>threshold)[0]
MN2_indices = np.where(MN_hit_hist[:, 2]>threshold)[0]
MN3_indices = np.where(MN_hit_hist[:, 3]>threshold)[0]
MN4_indices = np.where(MN_hit_hist[:, 4]>threshold)[0]
MN5_indices = np.where(MN_hit_hist[:, 5]>threshold)[0]
MN6_indices = np.where(MN_hit_hist[:, 6]>threshold)[0]
MN7_indices = np.where(MN_hit_hist[:, 7]>threshold)[0]
MN8_indices = np.where(MN_hit_hist[:, 8]>threshold)[0]
A00c0_indices = np.where(A00c_hit_hist[:, 0]>threshold)[0]
A00c1_indices = np.where(A00c_hit_hist[:, 1]>threshold)[0]
A00c2_indices = np.where(A00c_hit_hist[:, 2]>threshold)[0]
A00c3_indices = np.where(A00c_hit_hist[:, 3]>threshold)[0]
A00c4_indices = np.where(A00c_hit_hist[:, 4]>threshold)[0]
A00c5_indices = np.where(A00c_hit_hist[:, 5]>threshold)[0]
A00c6_indices = np.where(A00c_hit_hist[:, 6]>threshold)[0]
A00c7_indices = np.where(A00c_hit_hist[:, 7]>threshold)[0]
A00c8_indices = np.where(A00c_hit_hist[:, 8]>threshold)[0]
vtd0_indices = np.where(vtd_hit_hist[:, 0]>threshold)[0]
vtd1_indices = np.where(vtd_hit_hist[:, 1]>threshold)[0]
vtd2_indices = np.where(vtd_hit_hist[:, 2]>threshold)[0]
vtd3_indices = np.where(vtd_hit_hist[:, 3]>threshold)[0]
vtd4_indices = np.where(vtd_hit_hist[:, 4]>threshold)[0]
vtd5_indices = np.where(vtd_hit_hist[:, 5]>threshold)[0]
vtd6_indices = np.where(vtd_hit_hist[:, 6]>threshold)[0]
vtd7_indices = np.where(vtd_hit_hist[:, 7]>threshold)[0]
vtd8_indices = np.where(vtd_hit_hist[:, 8]>threshold)[0]
thermo0_indices = np.where(thermo_hit_hist[:, 0]>threshold)[0]
thermo1_indices = np.where(thermo_hit_hist[:, 1]>threshold)[0]
thermo2_indices = np.where(thermo_hit_hist[:, 2]>threshold)[0]
thermo3_indices = np.where(thermo_hit_hist[:, 3]>threshold)[0]
thermo4_indices = np.where(thermo_hit_hist[:, 4]>threshold)[0]
thermo5_indices = np.where(thermo_hit_hist[:, 5]>threshold)[0]
thermo6_indices = np.where(thermo_hit_hist[:, 6]>threshold)[0]
thermo7_indices = np.where(thermo_hit_hist[:, 7]>threshold)[0]
thermo8_indices = np.where(thermo_hit_hist[:, 8]>threshold)[0]
photo0_indices = np.where(photo_hit_hist[:, 0]>threshold)[0]
photo1_indices = np.where(photo_hit_hist[:, 1]>threshold)[0]
photo2_indices = np.where(photo_hit_hist[:, 2]>threshold)[0]
photo3_indices = np.where(photo_hit_hist[:, 3]>threshold)[0]
photo4_indices = np.where(photo_hit_hist[:, 4]>threshold)[0]
photo5_indices = np.where(photo_hit_hist[:, 5]>threshold)[0]
photo6_indices = np.where(photo_hit_hist[:, 6]>threshold)[0]
photo7_indices = np.where(photo_hit_hist[:, 7]>threshold)[0]
photo8_indices = np.where(photo_hit_hist[:, 8]>threshold)[0]
ORN_profile = pd.DataFrame([np.array(sensory_profile0.iloc[ORN0_indices, :].sum(axis=0)/len(ORN0_indices)),
np.array(sensory_profile1.iloc[ORN1_indices, :].sum(axis=0)/len(ORN1_indices)),
np.array(sensory_profile2.iloc[ORN2_indices, :].sum(axis=0)/len(ORN2_indices)),
np.array(sensory_profile3.iloc[ORN3_indices, :].sum(axis=0)/len(ORN3_indices)),
np.array(sensory_profile4.iloc[ORN4_indices, :].sum(axis=0)/len(ORN4_indices)),
np.array(sensory_profile5.iloc[ORN5_indices, :].sum(axis=0)/len(ORN5_indices)),
np.array(sensory_profile6.iloc[ORN6_indices, :].sum(axis=0)/len(ORN6_indices)),
np.array(sensory_profile7.iloc[ORN7_indices, :].sum(axis=0)/len(ORN7_indices)),
np.array(sensory_profile8.iloc[ORN8_indices, :].sum(axis=0)/len(ORN8_indices))],
index = ['ORN0', 'ORN1', 'ORN2', 'ORN3', 'ORN4', 'ORN5', 'ORN6', 'ORN7', 'ORN8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
AN_profile = pd.DataFrame([np.array(sensory_profile0.iloc[AN0_indices, :].sum(axis=0)/len(AN0_indices)),
np.array(sensory_profile1.iloc[AN1_indices, :].sum(axis=0)/len(AN1_indices)),
np.array(sensory_profile2.iloc[AN2_indices, :].sum(axis=0)/len(AN2_indices)),
np.array(sensory_profile3.iloc[AN3_indices, :].sum(axis=0)/len(AN3_indices)),
np.array(sensory_profile4.iloc[AN4_indices, :].sum(axis=0)/len(AN4_indices)),
np.array(sensory_profile5.iloc[AN5_indices, :].sum(axis=0)/len(AN5_indices)),
np.array(sensory_profile6.iloc[AN6_indices, :].sum(axis=0)/len(AN6_indices)),
np.array(sensory_profile7.iloc[AN7_indices, :].sum(axis=0)/len(AN7_indices)),
np.array(sensory_profile8.iloc[AN8_indices, :].sum(axis=0)/len(AN8_indices))],
index = ['AN0', 'AN1', 'AN2', 'AN3', 'AN4', 'AN5', 'AN6', 'AN7', 'AN8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
MN_profile = pd.DataFrame([np.array(sensory_profile0.iloc[MN0_indices, :].sum(axis=0)/len(MN0_indices)),
np.array(sensory_profile1.iloc[MN1_indices, :].sum(axis=0)/len(MN1_indices)),
np.array(sensory_profile2.iloc[MN2_indices, :].sum(axis=0)/len(MN2_indices)),
np.array(sensory_profile3.iloc[MN3_indices, :].sum(axis=0)/len(MN3_indices)),
np.array(sensory_profile4.iloc[MN4_indices, :].sum(axis=0)/len(MN4_indices)),
np.array(sensory_profile5.iloc[MN5_indices, :].sum(axis=0)/len(MN5_indices)),
np.array(sensory_profile6.iloc[MN6_indices, :].sum(axis=0)/len(MN6_indices)),
np.array(sensory_profile7.iloc[MN7_indices, :].sum(axis=0)/len(MN7_indices)),
np.array(sensory_profile8.iloc[MN8_indices, :].sum(axis=0)/len(MN8_indices))],
index = ['MN0', 'MN1', 'MN2', 'MN3', 'MN4', 'MN5', 'MN6', 'MN7', 'MN8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
A00c_profile = pd.DataFrame([np.array(sensory_profile0.iloc[A00c0_indices, :].sum(axis=0)/len(A00c0_indices)),
np.array(sensory_profile1.iloc[A00c1_indices, :].sum(axis=0)/len(A00c1_indices)),
np.array(sensory_profile2.iloc[A00c2_indices, :].sum(axis=0)/len(A00c2_indices)),
np.array(sensory_profile3.iloc[A00c3_indices, :].sum(axis=0)/len(A00c3_indices)),
np.array(sensory_profile4.iloc[A00c4_indices, :].sum(axis=0)/len(A00c4_indices)),
np.array(sensory_profile5.iloc[A00c5_indices, :].sum(axis=0)/len(A00c5_indices)),
np.array(sensory_profile6.iloc[A00c6_indices, :].sum(axis=0)/len(A00c6_indices)),
np.array(sensory_profile7.iloc[A00c7_indices, :].sum(axis=0)/len(A00c7_indices)),
np.array(sensory_profile8.iloc[A00c8_indices, :].sum(axis=0)/len(A00c8_indices))],
index = ['A00c0', 'A00c1', 'A00c2', 'A00c3', 'A00c4', 'A00c5', 'A00c6', 'A00c7', 'A00c8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
vtd_profile = pd.DataFrame([np.array(sensory_profile0.iloc[vtd0_indices, :].sum(axis=0)/len(vtd0_indices)),
np.array(sensory_profile1.iloc[vtd1_indices, :].sum(axis=0)/len(vtd1_indices)),
np.array(sensory_profile2.iloc[vtd2_indices, :].sum(axis=0)/len(vtd2_indices)),
np.array(sensory_profile3.iloc[vtd3_indices, :].sum(axis=0)/len(vtd3_indices)),
np.array(sensory_profile4.iloc[vtd4_indices, :].sum(axis=0)/len(vtd4_indices)),
np.array(sensory_profile5.iloc[vtd5_indices, :].sum(axis=0)/len(vtd5_indices)),
np.array(sensory_profile6.iloc[vtd6_indices, :].sum(axis=0)/len(vtd6_indices)),
np.array(sensory_profile7.iloc[vtd7_indices, :].sum(axis=0)/len(vtd7_indices)),
np.array(sensory_profile8.iloc[vtd8_indices, :].sum(axis=0)/len(vtd8_indices))],
index = ['vtd0', 'vtd1', 'vtd2', 'vtd3', 'vtd4', 'vtd5', 'vtd6', 'vtd7', 'vtd8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
thermo_profile = pd.DataFrame([np.array(sensory_profile0.iloc[thermo0_indices, :].sum(axis=0)/len(thermo0_indices)),
np.array(sensory_profile1.iloc[thermo1_indices, :].sum(axis=0)/len(thermo1_indices)),
np.array(sensory_profile2.iloc[thermo2_indices, :].sum(axis=0)/len(thermo2_indices)),
np.array(sensory_profile3.iloc[thermo3_indices, :].sum(axis=0)/len(thermo3_indices)),
np.array(sensory_profile4.iloc[thermo4_indices, :].sum(axis=0)/len(thermo4_indices)),
np.array(sensory_profile5.iloc[thermo5_indices, :].sum(axis=0)/len(thermo5_indices)),
np.array(sensory_profile6.iloc[thermo6_indices, :].sum(axis=0)/len(thermo6_indices)),
np.array(sensory_profile7.iloc[thermo7_indices, :].sum(axis=0)/len(thermo7_indices)),
np.array(sensory_profile8.iloc[thermo8_indices, :].sum(axis=0)/len(thermo8_indices))],
index = ['thermo0', 'thermo1', 'thermo2', 'thermo3', 'thermo4', 'thermo5', 'thermo6', 'thermo7', 'thermo8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
photo_profile = pd.DataFrame([np.array(sensory_profile0.iloc[photo0_indices, :].sum(axis=0)/len(photo0_indices)),
np.array(sensory_profile1.iloc[photo1_indices, :].sum(axis=0)/len(photo1_indices)),
np.array(sensory_profile2.iloc[photo2_indices, :].sum(axis=0)/len(photo2_indices)),
np.array(sensory_profile3.iloc[photo3_indices, :].sum(axis=0)/len(photo3_indices)),
np.array(sensory_profile4.iloc[photo4_indices, :].sum(axis=0)/len(photo4_indices)),
np.array(sensory_profile5.iloc[photo5_indices, :].sum(axis=0)/len(photo5_indices)),
np.array(sensory_profile6.iloc[photo6_indices, :].sum(axis=0)/len(photo3_indices)),
np.array(sensory_profile7.iloc[photo7_indices, :].sum(axis=0)/len(photo4_indices)),
np.array(sensory_profile8.iloc[photo8_indices, :].sum(axis=0)/len(photo5_indices))],
index = ['photo0', 'photo1', 'photo2', 'photo3', 'photo4', 'photo5', 'photo6', 'photo7', 'photo8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
# %%
# plotting multisensory elements per layer
x_axis_labels = [0,1,2,3,4,5,6]
x_label = 'Hops from Sensory'
fig, axs = plt.subplots(
4, 2, figsize=(5, 8)
)
fig.tight_layout(pad=2.5)
#cbar_ax = axs.add_axes([3, 7, .1, .75])
ax = axs[0, 0]
ax.set_title('Signal from ORN')
ax.set(xticks=[0, 1, 2, 3, 4, 5])
sns.heatmap(ORN_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[1, 0]
ax.set_title('Signal from AN')
sns.heatmap(AN_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[2, 0]
ax.set_title('Signal from MN')
sns.heatmap(MN_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[3, 0]
ax.set_title('Signal from A00c')
sns.heatmap(A00c_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax.set_xlabel(x_label)
ax = axs[0, 1]
ax.set_title('Signal from vtd')
sns.heatmap(vtd_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[1, 1]
ax.set_title('Signal from thermo')
sns.heatmap(thermo_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[2, 1]
ax.set_title('Signal from photo')
sns.heatmap(photo_profile.T.iloc[:,0:7], ax = ax, cbar_ax = axs[3, 1], xticklabels = x_axis_labels, rasterized=True)
ax.set_xlabel(x_label)
ax = axs[3, 1]
ax.set_xlabel('Number of Visits\nfrom Sensory Signal')
#ax.axis("off")
plt.savefig('cascades/plots/sensory_integration_per_hop.pdf', format='pdf', bbox_inches='tight')
# %%
# parallel coordinate plot of different sensory layer integration
fig, axs = plt.subplots(
6, 7, figsize=(30, 30), sharey = True
)
fig.tight_layout(pad=2.5)
threshold = 25
alpha = 0.10
#fig.tight_layout(pad=3.0)
sensory_profile0_parallel = sensory_profile0
sensory_profile0_parallel['class'] = np.zeros(len(sensory_profile0_parallel))
sensory_profile1_parallel = sensory_profile1
sensory_profile1_parallel['class'] = np.zeros(len(sensory_profile1_parallel))
sensory_profile2_parallel = sensory_profile2
sensory_profile2_parallel['class'] = np.zeros(len(sensory_profile2_parallel))
sensory_profile3_parallel = sensory_profile3
sensory_profile3_parallel['class'] = np.zeros(len(sensory_profile3_parallel))
sensory_profile4_parallel = sensory_profile4
sensory_profile4_parallel['class'] = np.zeros(len(sensory_profile4_parallel))
sensory_profile5_parallel = sensory_profile5
sensory_profile5_parallel['class'] = np.zeros(len(sensory_profile5_parallel))
column = 0
color = 'blue'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(ORN_hit_hist[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(ORN_hit_hist[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(ORN_hit_hist[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(ORN_hit_hist[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(ORN_hit_hist[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(ORN_hit_hist[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = AN_hit_hist
column = 1
color = 'orange'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(modality_list[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(modality_list[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(modality_list[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = MN_hit_hist
column = 2
color = 'green'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(modality_list[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(modality_list[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(modality_list[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = A00c_hit_hist
column = 3
color = 'maroon'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(modality_list[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(modality_list[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(modality_list[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = vtd_hit_hist
column = 4
color = 'purple'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(modality_list[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(modality_list[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(modality_list[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = thermo_hit_hist
column = 5
color = 'navy'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(modality_list[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[ | np.where(modality_list[:, 4]>threshold) | numpy.where |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from SpectrogramTools import *
from CQT import *
from NMF import *
from NMFGPU import *
from NMFJoint import *
from SongAnalogies import *
def testNMFJointSynthetic():
np.random.seed(100)
N = 20
M = 100
K = 6
H = np.random.rand(K, M)
W1 = np.random.rand(N, K)
W2 = np.random.rand(N, K)
X1 = W1.dot(H)
X2 = W2.dot(H)
lambdas = [0.01]*2
plotfn = lambda Xs, Us, Vs, VStar, errs: \
plotJointNMFwGT(Xs, Us, Vs, VStar, [W1, W2], [H.T, H.T], errs)
res = doJointNMF([X1, X2], lambdas, K, tol = 0.01, Verbose = True, plotfn = plotfn)
res['X1'] = X1
res['X2'] = X2
sio.savemat("JointNMF.mat", res)
def testNMF1DConvSynthetic():
np.random.seed(100)
N = 20
M = 40
K = 3
L = 80
T = 10
V = 0*np.ones((N, M))
V[5+np.arange(T), np.arange(T)] = 1
V[5+np.arange(T), 5+np.arange(T)] = 0.5
V[15-np.arange(T), 10+np.arange(T)] = 1
V[5+np.arange(T), 20+np.arange(T)] = 1
V[15-np.arange(T), 22+np.arange(T)] = 0.5
V[5+np.arange(T), 10+np.arange(T)] += 0.7
V *= 1000
#doNMF(V, K*T, L, plotfn=plotNMFSpectra)
doNMF1DConv(V, K, T+5, L, plotfn=plotNMF1DConvSpectra)
def testNMF2DConvSynthetic():
initParallelAlgorithms()
np.random.seed(300)
N = 20
M = 40
K = 2
L = 200
T = 10
F = 5
V = 0.1*np.ones((N, M))
V[5+np.arange(T), np.arange(T)] = 1
V[8+np.arange(T), 5+np.arange(T)] = 0.5
V[15-np.arange(T), 10+np.arange(T)] = 1
V[6+np.arange(T), 20+np.arange(T)] = 1
V[10-np.arange(T), 22+np.arange(T)] = 0.5
V[10+np.arange(T), 10+np.arange(T)] += 0.7
doNMF2DConv(V, K, T, F, L, doKL = True, plotfn=plotNMF2DConvSpectra)
#doNMF1DConv(V, K, T, L, plotfn=plotNMF1DConvSpectra)
def get2DSyntheticJointExample():
T = 10
F = 10
K = 3
M = 20
N = 60
W1 = np.zeros((T, M, K))
W2 = np.zeros((T, M, K))
#Pattern 1: A tall block in A that goes to a fat block in A'
[J, I] = np.meshgrid(np.arange(2), 4+np.arange(5))
W1[J.flatten(), I.flatten(), 0] = 1
[J, I] = np.meshgrid(np.arange(5), 7+np.arange(2))
W2[J.flatten(), I.flatten(), 0] = 1
#Pattern 2: An antidiagonal line in A that goes to a diagonal line in A'
W1[np.arange(7), 9-np.arange(7), 1] = 1
W2[np.arange(7), np.arange(7), 1] = 1
#Pattern 3: A square in A that goes into a circle in A'
[J, I] = np.meshgrid(np.arange(5), 10+np.arange(5))
I = I.flatten()
J = J.flatten()
W1[0, np.arange(10), 2] = 1
W1[9, np.arange(10), 2] = 1
W1[np.arange(10), 0, 2] = 1
W1[np.arange(10), 10, 2] = 1
[J, I] = np.meshgrid(np.arange(T), np.arange(T))
I = I.flatten()
J = J.flatten()
idx = np.arange(I.size)
idx = idx[np.abs((I-5)**2 + (J-5)**2 - 4**2) < 4]
I = I[idx]
J = J[idx]
W2[J, I, 2] = 1
H = np.zeros((F, K, N))
H[9, 0, [3, 15, 50]] = 1
H[0, 0, 27] = 1
#3 diagonal lines in a row, then a gap, then 3 in a row pitch shifted
H[0, 1, [5, 15, 25]] = 1
H[0, 1, [35, 45, 55]] = 1
#Squares and circles moving down then up
H[1, 2, [0, 48]] = 1
H[4, 2, [12, 36]] = 1
H[8, 2, 24] = 1
return {'W1':W1, 'W2':W2, 'H':H, 'T':T, 'F':F, 'K':K, 'M':M, 'N':N}
def testNMF2DConvJointSynthetic():
initParallelAlgorithms()
L = 200
res = get2DSyntheticJointExample()
[W1, W2, H, T, F, K] = \
[res['W1'], res['W2'], res['H'], res['T'], res['F'], res['K']]
A = multiplyConv2D(W1, H)
Ap = multiplyConv2D(W2, H)
doNMF2DConvJointGPU(A, Ap, K, T, F, L, doKL = False, plotfn=plotNMF2DConvSpectraJoint)
#doNMF1DConv(V, K, T, L, plotfn=plotNMF1DConvSpectra)
def testNMF2DConvJoint3WaySynthetic():
initParallelAlgorithms()
np.random.seed(300)
N2 = 40
res = get2DSyntheticJointExample()
[W1, W2, H1, T, F, K] = \
[res['W1'], res['W2'], res['H'], res['T'], res['F'], res['K']]
H2 = np.random.rand(F, K, N2)
H2[H2 > 0.98] = 1
H2[H2 < 1] = 0
A = multiplyConv2D(W1, H1)
Ap = multiplyConv2D(W2, H1)
B = multiplyConv2D(W1, H2)
doNMF2DConvJoint3WayGPU(A, Ap, B, K, T, F, 200, plotfn = plotNMF2DConvSpectraJoint3Way)
def outputNMFSounds(U1, U2, winSize, hopSize, Fs, fileprefix):
for k in range(U1.shape[1]):
S1 = np.repeat(U1[:, k][:, None], 60, axis = 1)
X1 = griffinLimInverse(S1, winSize, hopSize)
X1 = X1/np.max(np.abs(X1))
S2 = np.repeat(U2[:, k][:, None], 60, axis = 1)
X2 = griffinLimInverse(S2, winSize, hopSize)
X2 = X2/np.max(np.abs(X2))
X = np.array([X1.flatten(), X2.flatten()]).T
sio.wavfile.write("%s%i.wav"%(fileprefix, k), Fs, X)
def testNMFJointSmoothCriminal():
"""
Trying out the technique in
[1] "Multi-View Clustering via Joint Nonnegative Matrix Factorization"
<NAME>, <NAME>, <NAME>, <NAME>
"""
Fs, X = sio.wavfile.read("music/SmoothCriminalAligned.wav")
X1 = X[:, 0]/(2.0**15)
X2 = X[:, 1]/(2.0**15)
#Only take first 30 seconds for initial experiments
X1 = X1[0:Fs*30]
X2 = X2[0:Fs*30]
hopSize = 256
winSize = 2048
S1 = np.abs(STFT(X1, winSize, hopSize))
S2 = np.abs(STFT(X2, winSize, hopSize))
lambdas = [1e-4]*2
K = S1.shape[1]*2
plotfn = lambda Xs, Us, Vs, VStar, errs: \
plotJointNMFSpectra(Xs, Us, Vs, VStar, errs, hopSize)
res = doJointNMF([S1, S2], lambdas, K, tol = 0.01, Verbose = True, plotfn = plotfn)
U1 = res['Us'][0]
U2 = res['Us'][1]
V1 = res['Vs'][0]
V2 = res['Vs'][1]
S1Res = U1.dot(V1.T)
S2Res = U2.dot(V2.T)
X1Res = griffinLimInverse(S1Res, winSize, hopSize, NIters = 10)
X2Res = griffinLimInverse(S2Res, winSize, hopSize, NIters = 10)
X1Res = X1Res/np.max(np.abs(X1Res))
X2Res = X2Res/np.max(np.abs(X2Res))
sio.wavfile.write("MJ_%i_%.3g.wav"%(K, lambdas[0]), Fs, X1Res)
sio.wavfile.write("AAF_%i_%.3g.wav"%(K, lambdas[0]), Fs, X2Res)
#outputNMFSounds(U1, U2, winSize, hopSize, Fs, "MAJF")
#sio.savemat("JointNMFSTFT.mat", res)
#Now represent Bad in MJ's basis
import librosa
X, Fs = librosa.load("music/MJBad.mp3")
X = X[0:Fs*30]
S = np.abs(STFT(X, winSize, hopSize))
fn = lambda V, W, H, iter, errs: plotNMFSpectra(V, W, H, iter, errs, hopSize)
NIters = 100
H = doNMF(S, 10, NIters, W=U1, plotfn = fn)
SRes = U1.dot(H)
XRes = griffinLimInverse(SRes, winSize, hopSize, NIters = 10)
SResCover = U2.dot(H)
XResCover = griffinLimInverse(SResCover, winSize, hopSize, NIters = 10)
sio.wavfile.write("BadRes.wav", Fs, XRes)
sio.wavfile.write("BadResCover.wav", Fs, XResCover)
def testNMFMusaicingSimple():
"""
Try to replicate the results from the Driedger paper
"""
import librosa
winSize = 2048
hopSize = 1024
Fs = 22050
X, Fs = librosa.load("music/Bees_Buzzing.mp3")
WComplex = getPitchShiftedSpecs(X, Fs, winSize, hopSize, 6)
W = np.abs(WComplex)
X, Fs = librosa.load("music/Beatles_LetItBe.mp3")
V = np.abs(STFT(X, winSize, hopSize))
#librosa.display.specshow(librosa.amplitude_to_db(H), y_axis = 'log', x_axis = 'time')
fn = lambda V, W, H, iter, errs: plotNMFSpectra(V, W, H, iter, errs, hopSize)
NIters = 50
#(W, H) = doNMF(V, W.shape[1], NIters, W=W, plotfn = fn)
H = doNMFDriedger(V, W, NIters, r=7, p=10, c=6, plotfn=fn)
H = np.array(H, dtype=np.complex)
V2 = WComplex.dot(H)
sio.savemat("V2.mat", {"V2":V2, "H":H})
#V2 = sio.loadmat("V2.mat")["V2"]
X = iSTFT(V2, winSize, hopSize)
X = X/np.max(np.abs(X))
wavfile.write("letitbeeISTFT.wav", Fs, X)
print("Doing phase retrieval...")
Y = griffinLimInverse(V2, winSize, hopSize, NIters=30)
Y = Y/np.max(np.abs(Y))
wavfile.write("letitbee.wav", Fs, Y)
def testHarmPercMusic():
import librosa
from scipy.io import wavfile
import scipy.ndimage
foldername = "HarmPerc"
K = 2
#STFT Params
winSize = 2048
hopSize = 256
if not os.path.exists(foldername):
os.mkdir(foldername)
Fs, X = wavfile.read("music/SmoothCriminalAligned.wav")
X = np.array(X, dtype=np.float32)
A = X[:, 0]/(2.0**15)
Ap = X[:, 1]/(2.0**15)
#Take 20 seconds clips from each
A = A[0:Fs*20]
Ap = Ap[0:Fs*20]
B, Fs = librosa.load("music/MJBad.mp3")
B = B[Fs*3:Fs*23]
#B, Fs = librosa.load("music/MJSpeedDemonClip.wav")
SsA = []
SsAp = []
SsB = []
for (V, Ss, s) in zip([A, Ap, B], [SsA, SsAp, SsB], ["A", "Ap", "B"]):
S = STFT(V, winSize, hopSize)
Harm, Perc = librosa.decompose.hpss(S)
X1 = iSTFT(Harm, winSize, hopSize)
X2 = iSTFT(Perc, winSize, hopSize)
wavfile.write("%s/%s_0.wav"%(foldername, s), Fs, X1)
wavfile.write("%s/%s_1.wav"%(foldername, s), Fs, X2)
if s == "B":
Ss.append(Harm)
Ss.append(Perc)
else:
for Xk in [X1, X2]:
Ss.append(getPitchShiftedSpecs(Xk, Fs, winSize, hopSize))
##Do NMF Driedger on one track at a time
fn = lambda V, W, H, iter, errs: plotNMFSpectra(V, W, H, iter, errs, hopSize)
SFinal = np.zeros(SsB[0].shape, dtype = np.complex)
print("SFinal.shape = ", SFinal.shape)
for k in range(K):
print("Doing Driedger on track %i..."%k)
HFilename = "%s/DriedgerH%i.mat"%(foldername, k)
if not os.path.exists(HFilename):
H = doNMFDriedger(np.abs(SsB[k]), np.abs(SsA[k]), 100, \
r = 7, p = 10, c = 3, plotfn = fn)
sio.savemat(HFilename, {"H":H})
else:
H = sio.loadmat(HFilename)["H"]
H = np.array(H, dtype=np.complex)
S = SsA[k].dot(H)
X = griffinLimInverse(S, winSize, hopSize)
wavfile.write("%s/B%i_Driedger.wav"%(foldername, k), Fs, X)
S = SsAp[k].dot(H)
X = griffinLimInverse(S, winSize, hopSize)
wavfile.write("%s/Bp%i.wav"%(foldername, k), Fs, X)
SFinal += S
##Do Griffin Lim phase correction on the final mixed STFT
X = griffinLimInverse(SFinal, winSize, hopSize)
Y = X/np.max(np.abs(X))
wavfile.write("%s/BpFinal.wav"%foldername, Fs, Y)
def testNMF1DMusic():
import librosa
from scipy.io import wavfile
foldername = "1DNMFResults"
if not os.path.exists(foldername):
os.mkdir(foldername)
NIters = 80
hopSize = 256
winSize = 2048
#Step 1: Do joint embedding on A and Ap
K = 10
T = 16
Fs, X = sio.wavfile.read("music/SmoothCriminalAligned.wav")
X1 = X[:, 0]/(2.0**15)
X2 = X[:, 1]/(2.0**15)
#Only take first 30 seconds for initial experiments
X1 = X1[0:Fs*30]
X2 = X2[0:Fs*30]
#Load in B
B, Fs = librosa.load("music/MJBad.mp3")
B = B[Fs*3:Fs*23]
S1 = STFT(X1, winSize, hopSize)
N = S1.shape[0]
S2 = STFT(X2, winSize, hopSize)
SOrig = np.concatenate((S1, S2), 0)
S = np.abs(SOrig)
plotfn = lambda V, W, H, iter, errs: \
plotNMF1DConvSpectraJoint(V, W, H, iter, errs, hopLength = hopSize, \
audioParams = {'Fs':Fs, 'winSize':winSize, 'prefix':foldername})
filename = "%s/NMFAAp.mat"%foldername
if os.path.exists(filename):
res = sio.loadmat(filename)
[W, H] = [res['W'], res['H']]
else:
(W, H) = doNMF1DConvJoint(S, K, T, NIters, prefix=foldername, plotfn=plotfn)
sio.savemat(filename, {"W":W, "H":H})
W1 = W[:, 0:N, :]
W2 = W[:, N::, :]
S = multiplyConv1D(W, H)
S1 = S[0:N, :]
S2 = S[N::, :]
y_hat = griffinLimInverse(S1, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
sio.wavfile.write("%s/ANMF.wav"%foldername, Fs, y_hat)
y_hat = griffinLimInverse(S2, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
sio.wavfile.write("%s/ApNMF.wav"%foldername, Fs, y_hat)
#Also invert each Wt
for k in range(W.shape[2]):
Wk = np.array(W[:, :, k].T)
Wk1 = Wk[0:N, :]
Wk2 = Wk[N::, :]
y_hat = griffinLimInverse(Wk1, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
sio.wavfile.write("%s/WA_%i.wav"%(foldername, k), Fs, y_hat)
y_hat = griffinLimInverse(Wk2, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
sio.wavfile.write("%s/WAp_%i.wav"%(foldername, k), Fs, y_hat)
S1 = SOrig[0:N, :]
S2 = SOrig[N::, :]
(AllSsA, RatiosA) = getComplexNMF1DTemplates(S1, W1, H, p = 2, audioParams = {'winSize':winSize, \
'hopSize':hopSize, 'Fs':Fs, 'fileprefix':"%s/TrackA"%foldername})
(AllSsAp, RatiosAp) = getComplexNMF1DTemplates(S2, W2, H, p = 2, audioParams = {'winSize':winSize, \
'hopSize':hopSize, 'Fs':Fs, 'fileprefix':"%s/TrackAp"%foldername})
#Step 1a: Combine templates manually
clusters = [[3], [5], [0, 1, 2, 4, 6, 7, 8, 9]]
SsA = []
SsAp = []
for i, cluster in enumerate(clusters):
SAi = np.zeros(S1.shape, dtype = np.complex)
SApi = np.zeros(S2.shape, dtype = np.complex)
for idx in cluster:
SAi += AllSsA[idx]
SApi += AllSsAp[idx]
SsA.append(SAi)
SsAp.append(SApi)
y_hat = griffinLimInverse(SAi, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
wavfile.write("%s/TrackAManual%i.wav"%(foldername, i), Fs, y_hat)
y_hat = griffinLimInverse(SApi, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
wavfile.write("%s/TrackApManual%i.wav"%(foldername, i), Fs, y_hat)
#Step 2: Create a W matrix which is grouped by cluster and which has pitch shifted
#versions of each template
WB = np.array([])
clusteridxs = [0]
for i, cluster in enumerate(clusters):
for idx in cluster:
thisW = W1[:, :, idx].T
for shift in range(-6, 7):
thisWShift = pitchShiftSTFT(thisW, Fs, shift).T[:, :, None]
if WB.size == 0:
WB = thisWShift
else:
WB = np.concatenate((WB, thisWShift), 2)
clusteridxs.append(WB.shape[2])
print("WB.shape = ", WB.shape)
print("clusteridxs = ", clusteridxs)
sio.savemat("%s/WB.mat"%foldername, {"WB":WB})
#Step 3: Filter B by the new W matrix
SBOrig = STFT(B, winSize, hopSize)
plotfn = lambda V, W, H, iter, errs: \
plotNMF1DConvSpectra(V, W, H, iter, errs, hopLength = hopSize)
filename = "%s/NMFB.mat"%foldername
if not os.path.exists(filename):
(WB, HB) = doNMF1DConv(np.abs(SBOrig), WB.shape[2], T, NIters, W = WB)
sio.savemat(filename, {"HB":HB, "WB":WB})
else:
HB = sio.loadmat(filename)["HB"]
#Separate out B tracks
As = []
AsSum = np.zeros(SBOrig.shape)
p = 2
for i in range(len(clusters)):
thisH = np.array(HB)
thisH[0:clusteridxs[i], :] = 0
thisH[clusteridxs[i+1]::, :] = 0
As.append(multiplyConv1D(WB, thisH)**p)
AsSum += As[-1]
SsB = []
for i in range(len(clusters)):
SBi = SBOrig*As[i]/AsSum
SsB.append(SBi)
y_hat = griffinLimInverse(SBi, winSize, hopSize)
y_hat = y_hat/np.max(np.abs(y_hat))
wavfile.write("%s/TrackBManual%i.wav"%(foldername, i), Fs, y_hat)
plt.clf()
plt.plot(np.sum(As[i]**2, 0)/np.sum(AsSum**2, 0))
plt.savefig("%s/TrackBManual%s.svg"%(foldername, i))
#Step 4: Do NMF Driedger on one track of B at a time
NIters = 100
shiftrange = 6
for i in range(len(SsA)):
SsA[i] = getPitchShiftedSpecsFromSpec(SsA[i], Fs, winSize, hopSize, shiftrange=shiftrange)
SsAp[i] = getPitchShiftedSpecsFromSpec(SsAp[i], Fs, winSize, hopSize, shiftrange=shiftrange)
fn = lambda V, W, H, iter, errs: plotNMFSpectra(V, W, H, iter, errs, hopSize)
XFinal = np.array([])
for i in range(len(SsA)):
print("Doing track %i..."%i)
HFilename = "%s/H%i.mat"%(foldername, i)
if not os.path.exists(HFilename):
H = doNMFDriedger(np.abs(SsB[i]), np.abs(SsA[i]), NIters, \
r = 7, p = 10, c = 3, plotfn = fn)
sio.savemat(HFilename, {"H":H})
else:
H = sio.loadmat(HFilename)["H"]
H = np.array(H, dtype=np.complex)
S = SsA[i].dot(H)
X = griffinLimInverse(S, winSize, hopSize)
wavfile.write("%s/B%i_Driedger.wav"%(foldername, i), Fs, X)
S = SsAp[i].dot(H)
X = griffinLimInverse(S, winSize, hopSize)
Y = X/np.max(np.abs(X))
wavfile.write("%s/Bp%i.wav"%(foldername, i), Fs, Y)
if XFinal.size == 0:
XFinal = X
else:
XFinal += X
Y = XFinal/np.max( | np.abs(XFinal) | numpy.abs |
# ratios.py: simple method for estimating volume change and lake length ratios
#
# OVERVIEW
# this code constructs plots of estimated vs. true subglacial water volume change and
# subglacial lake length over a range of ice thicknesses and oscillation periods.
# the computation is based on a small-perturbation ice-flow model
# --see the supporting information for a full description of the method.
#
# the main parameters that can be set below are:
# (1) the (dimensional) basal friction coefficient beta_d)
# (2) the subglacial lake length (Ls)
# (3) the spatial component of the lake's basal vertical velocity anomaly (w_base); default is a Gaussian
# (4) the maximum amplitude of the oscillation (amp)
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d,interpolate
from scipy import integrate
from scipy.fft import fft,ifft,fftshift,fftfreq
import matplotlib as mpl
import numpy as np
from scipy.io import loadmat
import copy
mpl.rcParams['xtick.major.size'] = 4
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 4
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 4
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 4
mpl.rcParams['ytick.minor.width'] = 1
# 1.---------------FUNCTIONS FOR VOLUME CHANGE / LENGTH RATIOS------------------
def get_Dj(lamda,beta_nd,w_ft,k):
# function for computing displacements D1 (in-phase with base) and D2 (anti-phase with base)
k = np.abs(k)
g = beta_nd/k
# relaxation function
R1 = (1/k)*((1+g)*np.exp(4*k) - (2+4*g*k)*np.exp(2*k) + 1 -g)
D = (1+g)*np.exp(4*k) + (2*g+4*k+4*g*(k**2))*np.exp(2*k) -1 + g
R = R1/D
# transfer function
T1 = 2*(1+g)*(k+1)*np.exp(3*k) + 2*(1-g)*(k-1)*np.exp(k)
T = T1/D
G1 = T*w_ft
G2 = 1 + (lamda*R)**2
# displacements
D1 = ifft(G1/G2).real
D2 = ifft(lamda*R*G1/G2).real
return D1,D2
def get_Tj(D1,D2,x,H):
# Times where elevation anomaly is maximized (T1) and minimized (T2),
T1 = np.pi - np.arctan(np.mean(D2[np.abs(x)*H/1000<10])/np.mean(D1[np.abs(x)*H/1000<10]))
T2 = 2*np.pi - np.arctan(np.mean(D2[np.abs(x)*H/1000<10])/np.mean(D1[np.abs(x)*H/1000<10]))
return T1,T2
def get_kappaj(T1,T2):
# weights on the displacements:
# kappa1 is the in-phase component
# kappa2 is the anti-phase component
kappa1 = np.cos(T2) - np.cos(T1)
kappa2 = np.sin(T1) - np.sin(T2)
return kappa1,kappa2
def get_ratios(H,t_pd,beta_d,Ls):
# compute ratios of the estimated lake length (dL) and water volume change (dV)
# relative to their true values given the true lake length (Ls),
# dimensional friction (beta_d), and ice thickness (H)
# discretization in frequency domain
N = 2000
x = np.linspace(-100,100,num=N)
d = np.abs(x[1]-x[0])
k = fftfreq(N,d) # frequency
k[0] = 1e-10 # set zero frequency to small number due to (integrable) singularity
k *= 2*np.pi # convert to SciPy's Fourier transform definition (angular
# freq. definition) used in notes
w = w_base(x,Ls/H) # compute basal velocity anomaly
w_ft = fft(w) # fourier transform for numerical method
beta_nd = beta_d*H/(2*eta) # non-dimensional friction parameter
# relative to viscosity/ice thickness
tr = (4*np.pi*eta)/(rho*g*H) # relaxation time
lamda = t_pd/tr # ratio of oscillation time to relaxation time
D1,D2 = get_Dj(lamda,beta_nd,w_ft,k) # compute surface displacements
T1,T2 = get_Tj(D1,D2,x,H) # compute estimated highstand/lowstand times
kappa1,kappa2 = get_kappaj(T1,T2) # compute weights for displacements
dH = kappa1*D1 + kappa2*D2 # compute surface elevation change anomaly
dS = 2*w # elevation change at base
# interpolate displacements for integration
dSi = interpolate.interp1d(x, dS,fill_value="extrapolate")
dHi = interpolate.interp1d(x, dH,fill_value="extrapolate")
dVs = integrate.quad(dSi,-0.5*Ls/H,0.5*Ls/H,full_output=1)[0]
# compute estimated lake length
if np.size(x[np.abs(dH)>delta])>0:
x0 = x[np.abs(dH)>delta]
else:
x0 = 0*x
Lh = 2*np.max(x0) # (problem is symmetric with respect to x)
if Lh > 1e-5:
dVh = integrate.quad(dHi,-0.5*Lh,0.5*Lh,full_output=1)[0]
dV = dVh/dVs
dL = Lh*H/Ls
lag = (2/np.pi)*(np.pi-T1)
else:
dV = 0
dL = 0
lag = 1.01
return dV,dL,lag
# 2.------------------------- MODEL PARAMETERS ---------------------------------
# function for spatial component of basal vertical velocity anomaly
# default is a Gaussian
def w_base(x,Ls):
sigma = Ls/4 # define standard deviation for Gaussian
w = np.exp(-0.5*(x/sigma)**2)
return w
amp = 0.5 # oscillation amplitude at base (m)
delta = 0.1/amp # dimensionless displacement threshold corresponding
# to dimensional threshold of 0.1 m
eta = 1e13 # viscosity (Pa s)
rho = 917.0 # ice density kg/m^3
g = 9.81 # gravitational acceleration m^2/s
Ls = 10*1000.0 # lake length (km)
N_pts = 5 # number of ice thickness and friction
# values (between max and min values from data)
# for constructing minimum lake size function
# (the total number of computations is N_pts**2)
# 3.---COMPUTE VOLUME CHANGE AND LAKE LENGTH RATIOS AS FUNCTIONS OF BETA AND H---
# construct arrays for H and beta_d
H = | np.linspace(1000,4000,N_pts) | numpy.linspace |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles prediction output data from Medical Waveforms experiments.
Contains the PredictionDataService class that grabs and formats prediction data
to be sent to and rendered by the client.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
import numpy as np
from eeg_modelling.eeg_viewer import signal_helper
from eeg_modelling.eeg_viewer import utils
from eeg_modelling.pyprotos import data_pb2
class PredictionDataService(object):
"""Extracts experiment prediction data from a PredictionOutputs proto.
See prediction_output.proto
"""
def __init__(self, prediction_outputs, data_source, max_samples):
self._prediction_outputs = prediction_outputs
self._data_source = data_source
self._abs_start = data_source.GetStartTime()
self._max_samples = max_samples
def _GetChunkTiming(self, prediction_output):
chunk_info = prediction_output.chunk_info
start = utils.TimestampPb2ToSeconds(chunk_info.chunk_start_time)
start = round(start - self._abs_start)
duration = round(chunk_info.chunk_size_sec)
return start, duration
def _PreprocessAttributionData(self, prediction_output):
"""Thresholds and normalizes the attribution in PredictionOutput.
Args:
prediction_output: PredictionOutput Message.
Returns:
PredictionOutput Message with thresholded and normalized attribution
values.
"""
for label in prediction_output.label:
if not label.HasField('attribution_map'):
continue
attribution = label.attribution_map.attribution
# Threshold and normalize over flattened array
attribution = np.absolute( | np.array(attribution) | numpy.array |
import unittest
import numpy as np
import sympy
from sympy.abc import r, t, z
import discretize
from discretize import tests
np.random.seed(16)
TOL = 1e-1
# ----------------------------- Test Operators ------------------------------ #
MESHTYPES = ["uniformCylMesh", "randomCylMesh"]
call2 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 2])
call3 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 1], xyz[:, 2])
cyl_row2 = lambda g, xfun, yfun: np.c_[call2(xfun, g), call2(yfun, g)]
cyl_row3 = lambda g, xfun, yfun, zfun: np.c_[
call3(xfun, g), call3(yfun, g), call3(zfun, g)
]
cylF2 = lambda M, fx, fy: np.vstack(
(cyl_row2(M.gridFx, fx, fy), cyl_row2(M.gridFz, fx, fy))
)
cylF3 = lambda M, fx, fy, fz: np.vstack(
(
cyl_row3(M.gridFx, fx, fy, fz),
cyl_row3(M.gridFy, fx, fy, fz),
cyl_row3(M.gridFz, fx, fy, fz),
)
)
cylE3 = lambda M, ex, ey, ez: np.vstack(
(
cyl_row3(M.gridEx, ex, ey, ez),
cyl_row3(M.gridEy, ex, ey, ez),
cyl_row3(M.gridEz, ex, ey, ez),
)
)
# class TestCellGradx3D(tests.OrderTest):
# name = "CellGradx"
# MESHTYPES = MESHTYPES
# meshDimension = 3
# meshSizes = [8, 16, 32, 64]
# def getError(self):
# fun = lambda r, t, z: (
# np.sin(2.*np.pi*r) + np.sin(t) + np.sin(2*np.pi*z)
# )
# solR = lambda r, t, z: 2.*np.pi*np.cos(2.*np.pi*r)
# phi = call3(fun, self.M.gridCC)
# phix_num = self.M.cellGradx * phi
# phix_ana = call3(solR, self.M.gridFx)
# err = np.linalg.norm(phix_num - phix_ana, np.inf)
# return err
# def test_order(self):
# self.orderTest()
class TestFaceDiv3D(tests.OrderTest):
name = "FaceDiv"
meshTypes = MESHTYPES
meshDimension = 3
meshSizes = [8, 16, 32, 64]
def getError(self):
funR = lambda r, t, z: np.sin(2.0 * np.pi * r)
funT = lambda r, t, z: r * np.exp(-r) * np.sin(t) # * np.sin(2.*np.pi*r)
funZ = lambda r, t, z: np.sin(2.0 * np.pi * z)
sol = lambda r, t, z: (
(2 * np.pi * r * np.cos(2 * np.pi * r) + np.sin(2 * np.pi * r)) / r
+ np.exp(-r) * np.cos(t)
+ 2 * np.pi * np.cos(2 * np.pi * z)
)
Fc = cylF3(self.M, funR, funT, funZ)
# Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]]
F = self.M.projectFaceVector(Fc)
divF = self.M.faceDiv.dot(F)
divF_ana = call3(sol, self.M.gridCC)
err = np.linalg.norm((divF - divF_ana), np.inf)
return err
def test_order(self):
self.orderTest()
class TestEdgeCurl3D(tests.OrderTest):
name = "edgeCurl"
meshTypes = MESHTYPES
meshDimension = 3
meshSizes = [8, 16, 32, 64]
def getError(self):
# use the same function in r, t, z
# need to pick functions that make sense at the axis of symmetry
# careful that r, theta contributions make sense at axis of symmetry
funR = lambda r, t, z: np.sin(2 * np.pi * z) * np.sin(np.pi * r) * np.sin(t)
funT = lambda r, t, z: np.cos(np.pi * z) * np.sin(np.pi * r) * np.sin(t)
funZ = lambda r, t, z: np.sin(np.pi * r) * np.sin(t)
derivR_t = lambda r, t, z: np.sin(2 * np.pi * z) * np.sin(np.pi * r) * | np.cos(t) | numpy.cos |
from __future__ import division
from collections import Iterable, defaultdict
import copy
import os
import pickle
import itertools
from numbers import Integral, Real
from xml.etree import ElementTree as ET
import sys
import numpy as np
from openmc import Mesh, Filter, Trigger, Nuclide
from openmc.cross import CrossScore, CrossNuclide, CrossFilter
from openmc.filter import _FILTER_TYPES
import openmc.checkvalue as cv
from openmc.clean_xml import *
if sys.version_info[0] >= 3:
basestring = str
# "Static" variable for auto-generated Tally IDs
AUTO_TALLY_ID = 10000
def reset_auto_tally_id():
global AUTO_TALLY_ID
AUTO_TALLY_ID = 10000
class Tally(object):
"""A tally defined by a set of scores that are accumulated for a list of
nuclides given a set of filters.
Parameters
----------
tally_id : Integral, optional
Unique identifier for the tally. If none is specified, an identifier
will automatically be assigned
name : str, optional
Name of the tally. If not specified, the name is the empty string.
Attributes
----------
id : Integral
Unique identifier for the tally
name : str
Name of the tally
filters : list of openmc.filter.Filter
List of specified filters for the tally
nuclides : list of openmc.nuclide.Nuclide
List of nuclides to score results for
scores : list of str
List of defined scores, e.g. 'flux', 'fission', etc.
estimator : {'analog', 'tracklength', 'collision'}
Type of estimator for the tally
triggers : list of openmc.trigger.Trigger
List of tally triggers
num_score_bins : Integral
Total number of scores, accounting for the fact that a single
user-specified score, e.g. scatter-P3 or flux-Y2,2, might have multiple
bins
num_scores : Integral
Total number of user-specified scores
num_filter_bins : Integral
Total number of filter bins accounting for all filters
num_bins : Integral
Total number of bins for the tally
num_realizations : Integral
Total number of realizations
with_summary : bool
Whether or not a Summary has been linked
sum : ndarray
An array containing the sum of each independent realization for each bin
sum_sq : ndarray
An array containing the sum of each independent realization squared for
each bin
mean : ndarray
An array containing the sample mean for each bin
std_dev : ndarray
An array containing the sample standard deviation for each bin
"""
def __init__(self, tally_id=None, name=''):
# Initialize Tally class attributes
self.id = tally_id
self.name = name
self._filters = []
self._nuclides = []
self._scores = []
self._estimator = None
self._triggers = []
self._num_score_bins = 0
self._num_realizations = 0
self._with_summary = False
self._sum = None
self._sum_sq = None
self._mean = None
self._std_dev = None
self._with_batch_statistics = False
self._derived = False
self._sp_filename = None
self._results_read = False
def __deepcopy__(self, memo):
existing = memo.get(id(self))
# If this is the first time we have tried to copy this object, create a copy
if existing is None:
clone = type(self).__new__(type(self))
clone.id = self.id
clone.name = self.name
clone.estimator = self.estimator
clone.num_score_bins = self.num_score_bins
clone.num_realizations = self.num_realizations
clone._sum = copy.deepcopy(self._sum, memo)
clone._sum_sq = copy.deepcopy(self._sum_sq, memo)
clone._mean = copy.deepcopy(self._mean, memo)
clone._std_dev = copy.deepcopy(self._std_dev, memo)
clone._with_summary = self.with_summary
clone._with_batch_statistics = self.with_batch_statistics
clone._derived = self.derived
clone._sp_filename = self._sp_filename
clone._results_read = self._results_read
clone._filters = []
for filter in self.filters:
clone.add_filter(copy.deepcopy(filter, memo))
clone._nuclides = []
for nuclide in self.nuclides:
clone.add_nuclide(copy.deepcopy(nuclide, memo))
clone._scores = []
for score in self.scores:
clone.add_score(score)
clone._triggers = []
for trigger in self.triggers:
clone.add_trigger(trigger)
memo[id(self)] = clone
return clone
# If this object has been copied before, return the first copy made
else:
return existing
def __eq__(self, other):
if not isinstance(other, Tally):
return False
# Check all filters
if len(self.filters) != len(other.filters):
return False
for filter in self.filters:
if filter not in other.filters:
return False
# Check all nuclides
if len(self.nuclides) != len(other.nuclides):
return False
for nuclide in self.nuclides:
if nuclide not in other.nuclides:
return False
# Check all scores
if len(self.scores) != len(other.scores):
return False
for score in self.scores:
if score not in other.scores:
return False
if self.estimator != other.estimator:
return False
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def __repr__(self):
string = 'Tally\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self.id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self.name)
string += '{0: <16}{1}\n'.format('\tFilters', '=\t')
for filter in self.filters:
string += '{0: <16}\t\t{1}\t{2}\n'.format('', filter.type,
filter.bins)
string += '{0: <16}{1}'.format('\tNuclides', '=\t')
for nuclide in self.nuclides:
if isinstance(nuclide, Nuclide):
string += '{0} '.format(nuclide.name)
else:
string += '{0} '.format(nuclide)
string += '\n'
string += '{0: <16}{1}{2}\n'.format('\tScores', '=\t', self.scores)
string += '{0: <16}{1}{2}\n'.format('\tEstimator', '=\t', self.estimator)
return string
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def filters(self):
return self._filters
@property
def nuclides(self):
return self._nuclides
@property
def num_nuclides(self):
return len(self._nuclides)
@property
def scores(self):
return self._scores
@property
def num_scores(self):
return len(self._scores)
@property
def num_score_bins(self):
return self._num_score_bins
@property
def num_filter_bins(self):
num_bins = 1
for filter in self.filters:
num_bins *= filter.num_bins
return num_bins
@property
def num_bins(self):
num_bins = self.num_filter_bins
num_bins *= self.num_nuclides
num_bins *= self.num_score_bins
return num_bins
@property
def estimator(self):
return self._estimator
@property
def triggers(self):
return self._triggers
@property
def num_realizations(self):
return self._num_realizations
@property
def with_summary(self):
return self._with_summary
@property
def sum(self):
if not self._sp_filename:
return None
if not self._results_read:
import h5py
# Open the HDF5 statepoint file
f = h5py.File(self._sp_filename, 'r')
# Extract Tally data from the file
data = f['tallies/tally {0}/results'.format(
self.id)].value
sum = data['sum']
sum_sq = data['sum_sq']
# Define a routine to convert 0 to 1
def nonzero(val):
return 1 if not val else val
# Reshape the results arrays
new_shape = (nonzero(self.num_filter_bins),
nonzero(self.num_nuclides),
nonzero(self.num_score_bins))
sum = np.reshape(sum, new_shape)
sum_sq = np.reshape(sum_sq, new_shape)
# Set the data for this Tally
self._sum = sum
self._sum_sq = sum_sq
# Indicate that Tally results have been read
self._results_read = True
# Close the HDF5 statepoint file
f.close()
return self._sum
@property
def sum_sq(self):
if not self._sp_filename:
return None
if not self._results_read:
# Force reading of sum and sum_sq
self.sum
return self._sum_sq
@property
def mean(self):
if self._mean is None:
if not self._sp_filename:
return None
self._mean = self.sum / self.num_realizations
return self._mean
@property
def std_dev(self):
if self._std_dev is None:
if not self._sp_filename:
return None
n = self.num_realizations
nonzero = np.abs(self.mean) > 0
self._std_dev = np.zeros_like(self.mean)
self._std_dev[nonzero] = np.sqrt((self.sum_sq[nonzero]/n -
self.mean[nonzero]**2)/(n - 1))
self.with_batch_statistics = True
return self._std_dev
@property
def with_batch_statistics(self):
return self._with_batch_statistics
@property
def derived(self):
return self._derived
@estimator.setter
def estimator(self, estimator):
cv.check_value('estimator', estimator,
['analog', 'tracklength', 'collision'])
self._estimator = estimator
def add_trigger(self, trigger):
"""Add a tally trigger to the tally
Parameters
----------
trigger : openmc.trigger.Trigger
Trigger to add
"""
if not isinstance(trigger, Trigger):
msg = 'Unable to add a tally trigger for Tally ID="{0}" to ' \
'since "{1}" is not a Trigger'.format(self.id, trigger)
raise ValueError(msg)
if trigger not in self.triggers:
self.triggers.append(trigger)
@id.setter
def id(self, tally_id):
if tally_id is None:
global AUTO_TALLY_ID
self._id = AUTO_TALLY_ID
AUTO_TALLY_ID += 1
else:
cv.check_type('tally ID', tally_id, Integral)
cv.check_greater_than('tally ID', tally_id, 0, equality=True)
self._id = tally_id
@name.setter
def name(self, name):
if name is not None:
cv.check_type('tally name', name, basestring)
self._name = name
else:
self._name = ''
def add_filter(self, filter):
"""Add a filter to the tally
Parameters
----------
filter : openmc.filter.Filter
Filter to add
"""
if not isinstance(filter, (Filter, CrossFilter)):
msg = 'Unable to add Filter "{0}" to Tally ID="{1}" since it is ' \
'not a Filter object'.format(filter, self.id)
raise ValueError(msg)
self._filters.append(filter)
def add_nuclide(self, nuclide):
"""Specify that scores for a particular nuclide should be accumulated
Parameters
----------
nuclide : openmc.nuclide.Nuclide
Nuclide to add
"""
self._nuclides.append(nuclide)
def add_score(self, score):
"""Specify a quantity to be scored
Parameters
----------
score : str
Score to be accumulated, e.g. 'flux'
"""
if not isinstance(score, (basestring, CrossScore)):
msg = 'Unable to add score "{0}" to Tally ID="{1}" since it is ' \
'not a string'.format(score, self.id)
raise ValueError(msg)
# If the score is already in the Tally, don't add it again
if score in self.scores:
return
# Normal score strings
if isinstance(score, basestring):
self._scores.append(score.strip())
# CrossScores
else:
self._scores.append(score)
@num_score_bins.setter
def num_score_bins(self, num_score_bins):
self._num_score_bins = num_score_bins
@num_realizations.setter
def num_realizations(self, num_realizations):
cv.check_type('number of realizations', num_realizations, Integral)
cv.check_greater_than('number of realizations', num_realizations, 0, True)
self._num_realizations = num_realizations
@with_summary.setter
def with_summary(self, with_summary):
cv.check_type('with_summary', with_summary, bool)
self._with_summary = with_summary
@with_batch_statistics.setter
def with_batch_statistics(self, with_batch_statistics):
cv.check_type('with_batch_statistics', with_batch_statistics, bool)
self._with_batch_statistics = with_batch_statistics
@sum.setter
def sum(self, sum):
cv.check_type('sum', sum, Iterable)
self._sum = sum
@sum_sq.setter
def sum_sq(self, sum_sq):
cv.check_type('sum_sq', sum_sq, Iterable)
self._sum_sq = sum_sq
def remove_score(self, score):
"""Remove a score from the tally
Parameters
----------
score : str
Score to remove
"""
if score not in self.scores:
msg = 'Unable to remove score "{0}" from Tally ID="{1}" since ' \
'the Tally does not contain this score'.format(score, self.id)
ValueError(msg)
self._scores.remove(score)
def remove_filter(self, filter):
"""Remove a filter from the tally
Parameters
----------
filter : openmc.filter.Filter
Filter to remove
"""
if filter not in self.filters:
msg = 'Unable to remove filter "{0}" from Tally ID="{1}" since the ' \
'Tally does not contain this filter'.format(filter, self.id)
ValueError(msg)
self._filters.remove(filter)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the tally
Parameters
----------
nuclide : openmc.nuclide.Nuclide
Nuclide to remove
"""
if nuclide not in self.nuclides:
msg = 'Unable to remove nuclide "{0}" from Tally ID="{1}" since the ' \
'Tally does not contain this nuclide'.format(nuclide, self.id)
ValueError(msg)
self._nuclides.remove(nuclide)
def can_merge(self, tally):
"""Determine if another tally can be merged with this one
Parameters
----------
tally : Tally
Tally to check for merging
"""
if not isinstance(tally, Tally):
return False
# Must have same estimator
if self.estimator != tally.estimator:
return False
# Must have same nuclides
if len(self.nuclides) != len(tally.nuclides):
return False
for nuclide in self.nuclides:
if nuclide not in tally.nuclides:
return False
# Must have same or mergeable filters
if len(self.filters) != len(tally.filters):
return False
# Check if only one tally contains a delayed group filter
tally1_dg = False
for filter1 in self.filters:
if filter1.type == 'delayedgroup':
tally1_dg = True
tally2_dg = False
for filter2 in tally.filters:
if filter2.type == 'delayedgroup':
tally2_dg = True
# Return False if only one tally has a delayed group filter
if (tally1_dg or tally2_dg) and not (tally1_dg and tally2_dg):
return False
# Look to see if all filters are the same, or one or more can be merged
for filter1 in self.filters:
mergeable_filter = False
for filter2 in tally.filters:
if filter1 == filter2 or filter1.can_merge(filter2):
mergeable_filter = True
break
# If no mergeable filter was found, the tallies are not mergeable
if not mergeable_filter:
return False
# Tallies are mergeable if all conditional checks passed
return True
def merge(self, tally):
"""Merge another tally with this one
Parameters
----------
tally : Tally
Tally to merge with this one
Returns
-------
merged_tally : Tally
Merged tallies
"""
if not self.can_merge(tally):
msg = 'Unable to merge tally ID="{0}" with "{1}"'.format(tally.id, self.id)
raise ValueError(msg)
# Create deep copy of tally to return as merged tally
merged_tally = copy.deepcopy(self)
# Differentiate Tally with a new auto-generated Tally ID
merged_tally.id = None
# Merge filters
for i, filter1 in enumerate(merged_tally.filters):
for filter2 in tally.filters:
if filter1 != filter2 and filter1.can_merge(filter2):
merged_filter = filter1.merge(filter2)
merged_tally.filters[i] = merged_filter
break
# Add scores from second tally to merged tally
for score in tally.scores:
merged_tally.add_score(score)
# Add triggers from second tally to merged tally
for trigger in tally.triggers:
merged_tally.add_trigger(trigger)
return merged_tally
def get_tally_xml(self):
"""Return XML representation of the tally
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing tally data
"""
element = ET.Element("tally")
# Tally ID
element.set("id", str(self.id))
# Optional Tally name
if self.name != '':
element.set("name", self.name)
# Optional Tally filters
for filter in self.filters:
subelement = ET.SubElement(element, "filter")
subelement.set("type", str(filter.type))
if filter.bins is not None:
bins = ''
for bin in filter.bins:
bins += '{0} '.format(bin)
subelement.set("bins", bins.rstrip(' '))
# Optional Nuclides
if len(self.nuclides) > 0:
nuclides = ''
for nuclide in self.nuclides:
if isinstance(nuclide, Nuclide):
nuclides += '{0} '.format(nuclide.name)
else:
nuclides += '{0} '.format(nuclide)
subelement = ET.SubElement(element, "nuclides")
subelement.text = nuclides.rstrip(' ')
# Scores
if len(self.scores) == 0:
msg = 'Unable to get XML for Tally ID="{0}" since it does not ' \
'contain any scores'.format(self.id)
raise ValueError(msg)
else:
scores = ''
for score in self.scores:
scores += '{0} '.format(score)
subelement = ET.SubElement(element, "scores")
subelement.text = scores.rstrip(' ')
# Tally estimator type
if self.estimator is not None:
subelement = ET.SubElement(element, "estimator")
subelement.text = self.estimator
# Optional Triggers
for trigger in self.triggers:
trigger.get_trigger_xml(element)
return element
def find_filter(self, filter_type):
"""Return a filter in the tally that matches a specified type
Parameters
----------
filter_type : str
Type of the filter, e.g. 'mesh'
Returns
-------
filter : openmc.filter.Filter
Filter from this tally with matching type, or None if no matching
Filter is found
Raises
------
ValueError
If no matching Filter is found
"""
filter = None
# Look through all of this Tally's Filters for the type requested
for test_filter in self.filters:
if test_filter.type == filter_type:
filter = test_filter
break
# If we did not find the Filter, throw an Exception
if filter is None:
msg = 'Unable to find filter type "{0}" in ' \
'Tally ID="{1}"'.format(filter_type, self.id)
raise ValueError(msg)
return filter
def get_filter_index(self, filter_type, filter_bin):
"""Returns the index in the Tally's results array for a Filter bin
Parameters
----------
filter_type : str
The type of Filter (e.g., 'cell', 'energy', etc.)
filter_bin : Integral or tuple
The bin is an integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. The bin is an integer for the
cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is a (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
Returns
-------
The index in the Tally data array for this filter bin
"""
# Find the equivalent Filter in this Tally's list of Filters
filter = self.find_filter(filter_type)
# Get the index for the requested bin from the Filter and return it
filter_index = filter.get_bin_index(filter_bin)
return filter_index
def get_nuclide_index(self, nuclide):
"""Returns the index in the Tally's results array for a Nuclide bin
Parameters
----------
nuclide : str
The name of the Nuclide (e.g., 'H-1', 'U-238')
Returns
-------
nuclide_index : int
The index in the Tally data array for this nuclide.
Raises
------
KeyError
When the argument passed to the 'nuclide' parameter cannot be found
in the Tally.
"""
nuclide_index = -1
# Look for the user-requested nuclide in all of the Tally's Nuclides
for i, test_nuclide in enumerate(self.nuclides):
# If the Summary was linked, then values are Nuclide objects
if isinstance(test_nuclide, Nuclide):
if test_nuclide._name == nuclide:
nuclide_index = i
break
# If the Summary has not been linked, then values are ZAIDs
else:
if test_nuclide == nuclide:
nuclide_index = i
break
if nuclide_index == -1:
msg = 'Unable to get the nuclide index for Tally since "{0}" ' \
'is not one of the nuclides'.format(nuclide)
raise KeyError(msg)
else:
return nuclide_index
def get_score_index(self, score):
"""Returns the index in the Tally's results array for a score bin
Parameters
----------
score : str
The score string (e.g., 'absorption', 'nu-fission')
Returns
-------
score_index : int
The index in the Tally data array for this score.
Raises
------
ValueError
When the argument passed to the 'score' parameter cannot be found in
the Tally.
"""
try:
score_index = self.scores.index(score)
except ValueError:
msg = 'Unable to get the score index for Tally since "{0}" ' \
'is not one of the scores'.format(score)
raise ValueError(msg)
return score_index
def get_filter_indices(self, filters=[], filter_bins=[]):
"""Get indices into the filter axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the filter
axis of the tally's data array (axis=0) for particular combinations
of filters and their corresponding bins.
Parameters
----------
filters : list of str
A list of filter type strings
(e.g., ['mesh', 'energy']; default is [])
filter_bins : list of Iterables
A list of the filter bins corresponding to the filter_types
parameter (e.g., [(1,), (0., 0.625e-6)]; default is []). Each bin
in the list is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. Each bin is an integer for the
cell instance ID for 'distribcell' Filters. Each bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is a (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest. The order of the bins in the list must correspond to the
filter_types parameter.
Returns
-------
ndarray
A NumPy array of the filter indices
"""
cv.check_iterable_type('filters', filters, basestring)
cv.check_iterable_type('filter_bins', filter_bins, tuple)
# Determine the score indices from any of the requested scores
if filters:
# Initialize empty list of indices for each bin in each Filter
filter_indices = []
# Loop over all of the Tally's Filters
for i, filter in enumerate(self.filters):
user_filter = False
# If a user-requested Filter, get the user-requested bins
for j, test_filter in enumerate(filters):
if filter.type == test_filter:
bins = filter_bins[j]
user_filter = True
break
# If not a user-requested Filter, get all bins
if not user_filter:
# Create list of 2- or 3-tuples tuples for mesh cell bins
if filter.type == 'mesh':
dimension = filter.mesh.dimension
xyz = map(lambda x: np.arange(1, x+1), dimension)
bins = list(itertools.product(*xyz))
# Create list of 2-tuples for energy boundary bins
elif filter.type in ['energy', 'energyout']:
bins = []
for k in range(filter.num_bins):
bins.append((filter.bins[k], filter.bins[k+1]))
# Create list of cell instance IDs for distribcell Filters
elif filter.type == 'distribcell':
bins = np.arange(filter.num_bins)
# Create list of IDs for bins for all other filter types
else:
bins = filter.bins
# Initialize a NumPy array for the Filter bin indices
filter_indices.append(np.zeros(len(bins), dtype=np.int))
# Add indices for each bin in this Filter to the list
for j, bin in enumerate(bins):
filter_index = self.get_filter_index(filter.type, bin)
filter_indices[i][j] = filter_index
# Account for stride in each of the previous filters
for indices in filter_indices[:i]:
indices *= filter.num_bins
# Apply outer product sum between all filter bin indices
filter_indices = list(map(sum, itertools.product(*filter_indices)))
# If user did not specify any specific Filters, use them all
else:
filter_indices = np.arange(self.num_filter_bins)
return filter_indices
def get_nuclide_indices(self, nuclides):
"""Get indices into the nuclide axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the nuclide
axis of the tally's data array (axis=1) for one or more nuclides.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U-235', 'U-238']; default is [])
Returns
-------
ndarray
A NumPy array of the nuclide indices
"""
cv.check_iterable_type('nuclides', nuclides, basestring)
# Determine the score indices from any of the requested scores
if nuclides:
nuclide_indices = np.zeros(len(nuclides), dtype=np.int)
for i, nuclide in enumerate(nuclides):
nuclide_indices[i] = self.get_nuclide_index(nuclide)
# If user did not specify any specific Nuclides, use them all
else:
nuclide_indices = np.arange(self.num_nuclides)
return nuclide_indices
def get_score_indices(self, scores):
"""Get indices into the score axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the score
axis of the tally's data array (axis=2) for one or more scores.
Parameters
----------
scores : list of str
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
Returns
-------
ndarray
A NumPy array of the score indices
"""
cv.check_iterable_type('scores', scores, basestring)
# Determine the score indices from any of the requested scores
if scores:
score_indices = np.zeros(len(scores), dtype=np.int)
for i, score in enumerate(scores):
score_indices[i] = self.get_score_index(score)
# If user did not specify any specific scores, use them all
else:
score_indices = np.arange(self.num_scores)
return score_indices
def get_values(self, scores=[], filters=[], filter_bins=[],
nuclides=[], value='mean'):
"""Returns one or more tallied values given a list of scores, filters,
filter bins and nuclides.
This method constructs a 3D NumPy array for the requested Tally data
indexed by filter bin, nuclide bin, and score index. The method will
order the data in the array as specified in the parameter lists.
Parameters
----------
scores : list of str
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
filters : list of str
A list of filter type strings
(e.g., ['mesh', 'energy']; default is [])
filter_bins : list of Iterables
A list of the filter bins corresponding to the filter_types
parameter (e.g., [(1,), (0., 0.625e-6)]; default is []). Each bin
in the list is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. Each bin is an integer for the
cell instance ID for 'distribcell' Filters. Each bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is a (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest. The order of the bins in the list must correspond to the
filter_types parameter.
nuclides : list of str
A list of nuclide name strings
(e.g., ['U-235', 'U-238']; default is [])
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
float or ndarray
A scalar or NumPy array of the Tally data indexed in the order
each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method. ValueError is also thrown
if the input parameters do not correspond to the Tally's attributes,
e.g., if the score(s) do not match those in the Tally.
"""
# Ensure that StatePoint.read_results() was called first
if (value == 'mean' and self.mean is None) or \
(value == 'std_dev' and self.std_dev is None) or \
(value == 'rel_err' and self.mean is None) or \
(value == 'sum' and self.sum is None) or \
(value == 'sum_sq' and self.sum_sq is None):
msg = 'The Tally ID="{0}" has no data to return. Call the ' \
'StatePoint.read_results() method before using ' \
'Tally.get_values(...)'.format(self.id)
raise ValueError(msg)
# Get filter, nuclide and score indices
filter_indices = self.get_filter_indices(filters, filter_bins)
nuclide_indices = self.get_nuclide_indices(nuclides)
score_indices = self.get_score_indices(scores)
# Construct outer product of all three index types with each other
indices = np.ix_(filter_indices, nuclide_indices, score_indices)
# Return the desired result from Tally
if value == 'mean':
data = self.mean[indices]
elif value == 'std_dev':
data = self.std_dev[indices]
elif value == 'rel_err':
data = self.std_dev[indices] / self.mean[indices]
elif value == 'sum':
data = self.sum[indices]
elif value == 'sum_sq':
data = self.sum_sq[indices]
else:
msg = 'Unable to return results from Tally ID="{0}" since the ' \
'the requested value "{1}" is not \'mean\', \'std_dev\', ' \
'\'rel_err\', \'sum\', or \'sum_sq\''.format(self.id, value)
raise LookupError(msg)
return data
def get_pandas_dataframe(self, filters=True, nuclides=True,
scores=True, summary=None):
"""Build a Pandas DataFrame for the Tally data.
This method constructs a Pandas DataFrame object for the Tally data
with columns annotated by filter, nuclide and score bin information.
This capability has been tested for Pandas >=0.13.1. However, it is
recommended to use v0.16 or newer versions of Pandas since this method
uses the Multi-index Pandas feature.
Parameters
----------
filters : bool
Include columns with filter bin information (default is True).
nuclides : bool
Include columns with nuclide bin information (default is True).
scores : bool
Include columns with score bin information (default is True).
summary : None or Summary
An optional Summary object to be used to construct columns for
distribcell tally filters (default is None). The geometric
information in the Summary object is embedded into a Multi-index
column with a geometric "path" to each distribcell intance.
NOTE: This option requires the OpenCG Python package.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with each column annotated by filter, nuclide and
score bin information (if these parameters are True), and the mean
and standard deviation of the Tally's data.
Raises
------
KeyError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method.
ImportError
When Pandas can not be found on the caller's system
"""
# Ensure that StatePoint.read_results() was called first
if self.mean is None or self.std_dev is None:
msg = 'The Tally ID="{0}" has no data to return. Call the ' \
'StatePoint.read_results() method before using ' \
'Tally.get_pandas_dataframe(...)'.format(self.id)
raise KeyError(msg)
# If using Summary, ensure StatePoint.link_with_summary(...) was called
if summary and not self.with_summary:
msg = 'The Tally ID="{0}" has not been linked with the Summary. ' \
'Call the StatePoint.link_with_summary(...) method ' \
'before using Tally.get_pandas_dataframe(...) with ' \
'Summary info'.format(self.id)
raise KeyError(msg)
# Attempt to import Pandas
try:
import pandas as pd
except ImportError:
msg = 'The Pandas Python package must be installed on your system'
raise ImportError(msg)
# Initialize a pandas dataframe for the tally data
df = pd.DataFrame()
# Find the total length of the tally data array
data_size = self.mean.size
# Build DataFrame columns for filters if user requested them
if filters:
# Append each Filter's DataFrame to the overall DataFrame
for filter in self.filters:
filter_df = filter.get_pandas_dataframe(data_size, summary)
df = pd.concat([df, filter_df], axis=1)
# Include DataFrame column for nuclides if user requested it
if nuclides:
nuclides = []
for nuclide in self.nuclides:
# Write Nuclide name if Summary info was linked with StatePoint
if isinstance(nuclide, Nuclide):
nuclides.append(nuclide.name)
else:
nuclides.append(nuclide)
# Tile the nuclide bins into a DataFrame column
nuclides = np.repeat(nuclides, len(self.scores))
tile_factor = data_size / len(nuclides)
df['nuclide'] = np.tile(nuclides, tile_factor)
# Include column for scores if user requested it
if scores:
tile_factor = data_size / len(self.scores)
df['score'] = np.tile(self.scores, tile_factor)
# Append columns with mean, std. dev. for each tally bin
df['mean'] = self.mean.ravel()
df['std. dev.'] = self.std_dev.ravel()
df = df.dropna(axis=1)
# Expand the columns into Pandas MultiIndices for readability
if pd.__version__ >= '0.16':
columns = copy.deepcopy(df.columns.values)
# Convert all elements in columns list to tuples
for i, column in enumerate(columns):
if not isinstance(column, tuple):
columns[i] = (column,)
# Make each tuple the same length
max_len_column = len(max(columns, key=len))
for i, column in enumerate(columns):
delta_len = max_len_column - len(column)
if delta_len > 0:
new_column = list(column)
new_column.extend(['']*delta_len)
columns[i] = tuple(new_column)
# Create and set a MultiIndex for the DataFrame's columns
df.columns = pd.MultiIndex.from_tuples(columns)
return df
def get_reshaped_data(self, value='mean'):
"""Returns an array of tally data with one dimension per filter.
The tally data in OpenMC is stored as a 3D array with the dimensions
corresponding to filters, nuclides and scores. As a result, tally data
can be opaque for a user to directly index (i.e., without use of the
Tally.get_values(...) method) since one must know how to properly use
the number of bins and strides for each filter to index into the first
(filter) dimension.
This builds and returns a reshaped version of the tally data array with
unique dimensions corresponding to each tally filter. For example,
suppose this tally has arrays of data with shape (8,5,5) corresponding
to two filters (2 and 4 bins, respectively), five nuclides and five
scores. This method will return a version of the data array with the
with a new shape of (2,4,5,5) such that the first two dimensions
correspond directly to the two filters with two and four bins.
Parameters
----------
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
ndarray
The tally data array indexed by filters, nuclides and scores.
"""
# Get the 3D array of data in filters, nuclides and scores
data = self.get_values(value=value)
# Build a new array shape with one dimension per filter
new_shape = ()
for filter in self.filters:
new_shape += (filter.num_bins, )
new_shape += (self.num_nuclides,)
new_shape += (self.num_score_bins,)
# Reshape the data with one dimension for each filter
data = np.reshape(data, new_shape)
return data
def export_results(self, filename='tally-results', directory='.',
format='hdf5', append=True):
"""Exports tallly results to an HDF5 or Python pickle binary file.
Parameters
----------
filename : str
The name of the file for the results (default is 'tally-results')
directory : str
The name of the directory for the results (default is '.')
format : str
The format for the exported file - HDF5 ('hdf5', default) and
Python pickle ('pkl') files are supported
append : bool
Whether or not to append the results to the file (default is True)
Raises
------
KeyError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method.
"""
# Ensure that StatePoint.read_results() was called first
if self._sum is None or self._sum_sq is None and not self.derived:
msg = 'The Tally ID="{0}" has no data to export. Call the ' \
'StatePoint.read_results() method before using ' \
'Tally.export_results(...)'.format(self.id)
raise KeyError(msg)
if not isinstance(filename, basestring):
msg = 'Unable to export the results for Tally ID="{0}" to ' \
'filename="{1}" since it is not a ' \
'string'.format(self.id, filename)
raise ValueError(msg)
elif not isinstance(directory, basestring):
msg = 'Unable to export the results for Tally ID="{0}" to ' \
'directory="{1}" since it is not a ' \
'string'.format(self.id, directory)
raise ValueError(msg)
elif format not in ['hdf5', 'pkl', 'csv']:
msg = 'Unable to export the results for Tally ID="{0}" to format ' \
'"{1}" since it is not supported'.format(self.id, format)
raise ValueError(msg)
elif not isinstance(append, bool):
msg = 'Unable to export the results for Tally ID="{0}" since the ' \
'append parameter is not True/False'.format(self.id, append)
raise ValueError(msg)
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# HDF5 binary file
if format == 'hdf5':
import h5py
filename = directory + '/' + filename + '.h5'
if append:
tally_results = h5py.File(filename, 'a')
else:
tally_results = h5py.File(filename, 'w')
# Create an HDF5 group within the file for this particular Tally
tally_group = tally_results.create_group('Tally-{0}'.format(self.id))
# Add basic Tally data to the HDF5 group
tally_group.create_dataset('id', data=self.id)
tally_group.create_dataset('name', data=self.name)
tally_group.create_dataset('estimator', data=self.estimator)
tally_group.create_dataset('scores', data=np.array(self.scores))
# Add a string array of the nuclides to the HDF5 group
nuclides = []
for nuclide in self.nuclides:
nuclides.append(nuclide.name)
tally_group.create_dataset('nuclides', data=np.array(nuclides))
# Create an HDF5 sub-group for the Filters
filter_group = tally_group.create_group('filters')
for filter in self.filters:
filter_group.create_dataset(filter.type, data=filter.bins)
# Add all results to the main HDF5 group for the Tally
tally_group.create_dataset('sum', data=self.sum)
tally_group.create_dataset('sum_sq', data=self.sum_sq)
tally_group.create_dataset('mean', data=self.mean)
tally_group.create_dataset('std_dev', data=self.std_dev)
# Close the Tally results HDF5 file
tally_results.close()
# Python pickle binary file
elif format == 'pkl':
# Load the dictionary from the Pickle file
filename = directory + '/' + filename + '.pkl'
if os.path.exists(filename) and append:
tally_results = pickle.load(file(filename, 'rb'))
else:
tally_results = {}
# Create a nested dictionary within the file for this particular Tally
tally_results['Tally-{0}'.format(self.id)] = {}
tally_group = tally_results['Tally-{0}'.format(self.id)]
# Add basic Tally data to the nested dictionary
tally_group['id'] = self.id
tally_group['name'] = self.name
tally_group['estimator'] = self.estimator
tally_group['scores'] = np.array(self.scores)
# Add a string array of the nuclides to the HDF5 group
nuclides = []
for nuclide in self.nuclides:
nuclides.append(nuclide.name)
tally_group['nuclides'] = np.array(nuclides)
# Create a nested dictionary for the Filters
tally_group['filters'] = {}
filter_group = tally_group['filters']
for filter in self.filters:
filter_group[filter.type] = filter.bins
# Add all results to the main sub-dictionary for the Tally
tally_group['sum'] = self.sum
tally_group['sum_sq'] = self.sum_sq
tally_group['mean'] = self.mean
tally_group['std_dev'] = self.std_dev
# Pickle the Tally results to a file
pickle.dump(tally_results, open(filename, 'wb'))
def _outer_product(self, other, binary_op):
"""Combines filters, scores and nuclides with another tally.
This is a helper method for the tally arithmetic methods. The filters,
scores and nuclides from both tallies are enumerated into all possible
combinations and expressed as CrossFilter, CrossScore and
CrossNuclide objects in the new derived tally.
Parameters
----------
other : Tally
The tally on the right hand side of the outer product
binary_op : {'+', '-', '*', '/', '^'}
The binary operation in the outer product
Returns
-------
Tally
A new Tally that is the outer product with this one.
Raises
------
ValueError
When this method is called before the other tally is populated
with data by the StatePoint.read_results() method.
"""
# Check that results have been read
if not other.derived and other.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{0}" ' \
'since it does not contain any results.'.format(other.id)
raise ValueError(msg)
new_tally = Tally()
new_tally.with_batch_statistics = True
new_tally._derived = True
# Construct a combined derived name from the two tally operands
if self.name != '' and other.name != '':
new_name = '({0} {1} {2})'.format(self.name, binary_op, other.name)
new_tally.name = new_name
# Create copies of self and other tallies to rearrange for tally
# arithmetic
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
# Find any shared filters between the two tallies
filter_intersect = []
for filter in self_copy.filters:
if filter in other_copy.filters:
filter_intersect.append(filter)
# Align the shared filters in successive order
for i, filter in enumerate(filter_intersect):
self_index = self_copy.filters.index(filter)
other_index = other_copy.filters.index(filter)
# If necessary, swap self filter
if self_index != i:
self_copy.swap_filters(filter, self_copy.filters[i], inplace=True)
# If necessary, swap other filter
if other_index != i:
other_copy.swap_filters(filter, other_copy.filters[i], inplace=True)
data = self_copy._align_tally_data(other_copy)
if binary_op == '+':
new_tally._mean = data['self']['mean'] + data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '-':
new_tally._mean = data['self']['mean'] - data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '*':
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] * data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '/':
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] / data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '^':
mean_ratio = data['other']['mean'] / data['self']['mean']
first_term = mean_ratio * data['self']['std. dev.']
second_term = \
np.log(data['self']['mean']) * data['other']['std. dev.']
new_tally._mean = data['self']['mean'] ** data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(first_term**2 + second_term**2)
if self_copy.estimator == other_copy.estimator:
new_tally.estimator = self_copy.estimator
if self_copy.with_summary and other_copy.with_summary:
new_tally.with_summary = self_copy.with_summary
if self_copy.num_realizations == other_copy.num_realizations:
new_tally.num_realizations = self_copy.num_realizations
# If filters are identical, simply reuse them in derived tally
if self_copy.filters == other_copy.filters:
for self_filter in self_copy.filters:
new_tally.add_filter(self_filter)
# Generate filter "outer products" for non-identical filters
else:
# Find the common longest sequence of shared filters
match = 0
for self_filter, other_filter in zip(self_copy.filters, other_copy.filters):
if self_filter == other_filter:
match += 1
else:
break
match_filters = self_copy.filters[:match]
cross_filters = [self_copy.filters[match:], other_copy.filters[match:]]
# Simply reuse shared filters in derived tally
for filter in match_filters:
new_tally.add_filter(filter)
# Use cross filters to combine non-shared filters in derived tally
if len(self_copy.filters) != match and len(other_copy.filters) == match:
for filter in cross_filters[0]:
new_tally.add_filter(filter)
elif len(self_copy.filters) == match and len(other_copy.filters) != match:
for filter in cross_filters[1]:
new_tally.add_filter(filter)
else:
for self_filter, other_filter in itertools.product(*cross_filters):
new_filter = CrossFilter(self_filter, other_filter, binary_op)
new_tally.add_filter(new_filter)
# Generate score "outer products"
if self_copy.scores == other_copy.scores:
new_tally.num_score_bins = self_copy.num_score_bins
for self_score in self_copy.scores:
new_tally.add_score(self_score)
else:
new_tally.num_score_bins = self_copy.num_score_bins * other_copy.num_score_bins
all_scores = [self_copy.scores, other_copy.scores]
for self_score, other_score in itertools.product(*all_scores):
new_score = CrossScore(self_score, other_score, binary_op)
new_tally.add_score(new_score)
# Generate nuclide "outer products"
if self_copy.nuclides == other_copy.nuclides:
for self_nuclide in self_copy.nuclides:
new_tally.nuclides.append(self_nuclide)
else:
all_nuclides = [self_copy.nuclides, other_copy.nuclides]
for self_nuclide, other_nuclide in itertools.product(*all_nuclides):
new_nuclide = CrossNuclide(self_nuclide, other_nuclide, binary_op)
new_tally.add_nuclide(new_nuclide)
# Correct each Filter's stride
stride = new_tally.num_nuclides * new_tally.num_score_bins
for filter in reversed(new_tally.filters):
filter.stride = stride
stride *= filter.num_bins
return new_tally
def _align_tally_data(self, other):
"""Aligns data from two tallies for tally arithmetic.
This is a helper method to construct a dict of dicts of the "aligned"
data arrays from each tally for tally arithmetic. The method analyzes
the filters, scores and nuclides in both tally's and determines how to
appropriately align the data for vectorized arithmetic. For example,
if the two tallies have different filters, this method will use NumPy
'tile' and 'repeat' operations to the new data arrays such that all
possible combinations of the data in each tally's bins will be made
when the arithmetic operation is applied to the arrays.
Parameters
----------
other : Tally
The tally to outer product with this tally
Returns
-------
dict
A dictionary of dictionaries to "aligned" 'mean' and 'std. dev'
NumPy arrays for each tally's data.
"""
self_mean = copy.deepcopy(self.mean)
self_std_dev = copy.deepcopy(self.std_dev)
other_mean = copy.deepcopy(other.mean)
other_std_dev = copy.deepcopy(other.std_dev)
if self.filters != other.filters:
# Determine the number of paired combinations of filter bins
# between the two tallies and repeat arrays along filter axes
diff1 = list(set(self.filters).difference(set(other.filters)))
diff2 = list(set(other.filters).difference(set(self.filters)))
# Determine the factors by which each tally operands' data arrays
# must be tiled or repeated for the tally outer product
other_tile_factor = 1
self_repeat_factor = 1
for filter in diff1:
other_tile_factor *= filter.num_bins
for filter in diff2:
self_repeat_factor *= filter.num_bins
# Tile / repeat the tally data for the tally outer product
self_shape = list(self_mean.shape)
other_shape = list(other_mean.shape)
self_shape[0] *= self_repeat_factor
self_mean = np.repeat(self_mean, self_repeat_factor)
self_std_dev = np.repeat(self_std_dev, self_repeat_factor)
if self_repeat_factor == 1:
other_shape[0] *= other_tile_factor
other_mean = np.repeat(other_mean, other_tile_factor, axis=0)
other_std_dev = np.repeat(other_std_dev, other_tile_factor,
axis=0)
else:
other_mean = | np.tile(other_mean, (other_tile_factor, 1, 1)) | numpy.tile |
"""
Leak data, leak distribution properties, and leak objects are created in this module
"""
import pickle
import numpy as np
from os.path import dirname, abspath
import os
# Constants:
g = 9.8 # g is the strength of gravity [m/s^2]
RHO_AIR = 1225 # density of air [g/m^3]
RHO_METHANE = 681 # density of methane at atmospheric pressure [g/m^3]
class Leak:
"""
Stores a list of leaks
"""
def __init__(self, flux=(), leaks_detected=(), capacity=0,
reparable=True, site_index=(), comp_index=(), endtime=np.infty):
"""
Inputs:
flux leak size (g/s)
leaks_detected Binary value to save whether the leak has been detected or not (1 if detected, 0 otherwise)
capacity Expected total number of leaks to be stored in this instance of Leak (allows for faster
extend method)
reparable Indicates whether an emission is reparable leak or a permanent vent
endtime time index when emission will stop if not repaired
"""
if leaks_detected == () and flux != ():
leaks_detected = np.zeros(len(flux))
try:
length_in = len(flux)
except TypeError:
length_in = 1
if capacity == 0:
self.flux = np.array(flux)
self.site_index = np.array(site_index)
self.comp_index = np.array(comp_index)
self.leaks_detected = np.array(leaks_detected) if leaks_detected != () else np.zeros(length_in)
if reparable is True:
self.reparable = np.ones(length_in, dtype=np.bool)
elif reparable is False:
self.reparable = np.zeros(length_in, dtype=np.bool)
else:
self.reparable = reparable
try:
if len(endtime) == length_in:
self.endtime = np.array(endtime)
except TypeError:
self.endtime = | np.ones(length_in) | numpy.ones |
from ...util import context, call, config_update
import multiprocessing as mp
import numpy as np
import os
import click
def minmax(el, v):
return (el - np.min(v)) / (np.max(v) - np.min(v)) if | np.max(v) | numpy.max |
import coopihc
from coopihc.space import StateElement, State, StateNotContainedError, Space
import gym
import numpy
import sys
import copy
_str = sys.argv[1]
# -------- Correct assigment
if _str == "correct" or _str == "all":
x = StateElement(
values=None,
spaces=[
coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
coopihc.space.Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
coopihc.space.Space(
[numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]
),
],
)
gridsize = (11, 11)
number_of_targets = 3
y = StateElement(
values=None,
spaces=[
Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
for j in range(number_of_targets)
],
clipping_mode="error",
)
x = StateElement(
values=None,
spaces=[
coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
coopihc.space.Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
coopihc.space.Space([numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]),
],
)
gridsize = (11, 11)
number_of_targets = 3
y = StateElement(
values=None,
spaces=[
Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
for j in range(number_of_targets)
],
clipping_mode="error",
)
if _str == "action-state":
a = StateElement(values=None, spaces=Space([numpy.array([None], dtype=object)]))
# -------------- accessing values
if _str == "access" or _str == "all":
x["values"]
x["spaces"]
x.values
x["values"] = [
numpy.array([[0.335]]),
numpy.array([[2]]),
numpy.array([[-4]]),
]
x["values"] = [0.2, 2, -5]
y["values"]
y["spaces"]
y["values"] = [
numpy.array([1, 1]),
numpy.array([0, 0]),
numpy.array([2, 2]),
]
y["values"] = [
numpy.array([15, 15]),
numpy.array([0, 0]),
numpy.array([2, 2]),
]
# ------ normal reset
if _str == "reset" or _str == "all":
x.reset()
y.reset()
# -------- forced reset
if _str == "forced-reset" or _str == "all":
reset_dic = {"values": [-1 / 2, 2, -2]}
x.reset(dic=reset_dic)
reset_dic = {"values": [[0, 0], [10, 10], [5, 5]]}
y.reset(dic=reset_dic)
# ------ iterate on StateElement
if _str == "iter" or _str == "all":
for _x in x:
print(_x)
for _y in y:
print(_y)
if _str == "cartesianproduct" or _str == "all":
x.reset()
for n, _x in enumerate(x.cartesian_product()):
# print(n, _x.values)
print(n, _x)
y.reset()
# There are a million elements in y
for n, _y in enumerate(y[0].cartesian_product()):
print(n, _y.values)
if _str == "comp" or _str == "all":
x.reset()
a = x[0]
print(x < numpy.array([2, -2, 4]))
if _str == "len" or _str == "all":
len(x)
len(y)
if _str == "cast" or _str == "all":
y.reset()
targetdomain = StateElement(
values=None,
spaces=[
coopihc.space.Space(
[
-numpy.ones((2, 1), dtype=numpy.float32),
numpy.ones((2, 1), dtype=numpy.float32),
]
)
for j in range(3)
],
)
res = y.cast(targetdomain)
b = StateElement(
values=5,
spaces=coopihc.space.Space(
[numpy.array([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], dtype=numpy.int16)]
),
)
a = StateElement(
values=0,
spaces=coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
# C2D
continuous = []
discrete = []
for elem in | numpy.linspace(-1, 1, 200) | numpy.linspace |
'''
rotation_coreg_phase_corr.py
Script for performing corregistration (rotation) of two 2D images by using phase correlation \
in polar coordinate system
PhaseCorrDiffEvolutionRotationPolar \
Phase correlation (degree level) in Polar coordinate system with differential evolution (sub-degree level), \
image comparison metric = mean squared error
'''
import numpy as np
from utils.coreg_utils import ImageTranslate, _NormalizeImage, _ApplyHighPassFilter, _ApplyHannWindow, \
_MseMetric, _CartesianToPolar, _DiffEvolutionRotation
def _PhaseCorr(image_ref, image_shifted):
# Determine pixel level shift
Nrows, Ncols = image_ref.shape;
image_ref_fft = np.fft.fft2(image_ref);
image_shifted_fft = np.fft.fft2(image_shifted);
top = np.multiply(np.matrix.conjugate(image_ref_fft), (image_shifted_fft));
bottom = | np.abs(top) | numpy.abs |
"""Implements the utilities to generate general multi-objective mixed-integer linear program instances
Referenced articles:
@article{mavrotas2005multi,
title={Multi-criteria branch and bound: A vector maximization algorithm for mixed 0-1 multiple objective linear programming},
author={<NAME> and <NAME>},
journal={Applied mathematics and computation},
volume={171},
number={1},
pages={53--71},
year={2005},
publisher={Elsevier}
}
@article{boland2015criterion,
title={A criterion space search algorithm for biobjective mixed integer programming: The triangle splitting method},
author={<NAME> and <NAME> and <NAME>},
journal={INFORMS Journal on Computing},
volume={27},
number={4},
pages={597--618},
year={2015},
publisher={INFORMS}
}
@article{kirlik2014new,
title={A new algorithm for generating all nondominated solutions of multiobjective discrete optimization problems},
author={<NAME> and <NAME>},
journal={European Journal of Operational Research},
volume={232},
number={3},
pages={479--488},
year={2014},
publisher={Elsevier}
}
"""
from abc import ABCMeta, abstractmethod
from gurobipy import GRB, LinExpr, Model
import numpy as np
import os
import pandas as pd
class MomilpInstanceParameterSet:
"""Implements MOMILP instance parameter set"""
def __init__(
self,
constraint_coeff_range=(-1, 20),
continuous_var_obj_coeff_range=(-10, 10),
# if 'True', all the integer variables have zero coefficient in the discrete objectives
dummy_discrete_obj=True,
integer_var_obj_coeff_range=(-200, 200),
# num of binary variables out of the num of integer vars
num_binary_vars=10,
num_constraints=20,
num_continuous_vars=10,
# starting from the objective function at the first index
num_discrete_objs=1,
num_integer_vars=10,
num_objs=3,
obj_sense="max",
rhs_range=(50, 100)):
self.constraint_coeff_range = constraint_coeff_range
self.continuous_var_obj_coeff_range = continuous_var_obj_coeff_range
self.dummy_discrete_obj = dummy_discrete_obj
self.integer_var_obj_coeff_range = integer_var_obj_coeff_range
self.num_binary_vars = num_binary_vars
self.num_constraints = num_constraints
self.num_continuous_vars = num_continuous_vars
self.num_discrete_objs = num_discrete_objs
self.num_integer_vars = num_integer_vars
self.num_objs = num_objs
self.obj_sense = obj_sense
self.rhs_range = rhs_range
def to_dict(self):
"""Returns the dictionary representation of the parameter set"""
return self.__dict__
class MomilpInstance(metaclass=ABCMeta):
"""Implements an abstract MOMILP instance class"""
@abstractmethod
def write(self, path):
"""Writes the model"""
class MomilpInstanceData:
"""Implements a MOMILP instance data"""
def __init__(
self, param_2_value, constraint_coeff_df=None, continuous_var_obj_coeff_df=None,
integer_var_obj_coeff_df=None, rhs=None):
self._constraint_coeff_df = constraint_coeff_df
self._continuous_var_obj_coeff_df = continuous_var_obj_coeff_df
self._integer_var_obj_coeff_df = integer_var_obj_coeff_df
self._param_2_value = param_2_value
self._rhs = rhs
def constraint_coeff_df(self):
"""Returns the constraint coefficient data frame
NOTE: An (m by n) matrix where rows are constraints and columns are variables"""
return self._constraint_coeff_df
def continuous_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the continuous variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._continuous_var_obj_coeff_df
def integer_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the integer variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._integer_var_obj_coeff_df
def rhs(self):
"""Returns the right-hand-side values of the constraints
NOTE: A series of length m"""
return self._rhs
class KnapsackFileInstanceData(MomilpInstanceData):
"""Implements a Knapsack problem instance data retrived from a file
NOTE: Based on the data input schema defined in Kirlik and Sayin. (2014):
http://home.ku.edu.tr/~moolibrary/
A '.dat' file describing a multi-objective 0-1 knapsack problem
Line 1: Number of objective functions, p
Line 2: Number of objects, n
Line 3: Capacity of the knapsack, W
Line 5: Profits of the objects in each objective function, V
Line 6: Weights of the objects, w
"""
_ESCAPED_CHARACTERS = ["[", "]"]
_LINE_DELIMITER = ", "
_NEW_LINE_SEPARATOR = "\n"
def __init__(self, file_name, param_2_value):
super(KnapsackFileInstanceData, self).__init__(param_2_value)
self._file_name = file_name
self._create()
def _create(self):
"""Creates the instance data"""
lines = []
with open(self._file_name, "r") as f:
lines = f.readlines()
# read the number of objectives
num_objectives = int(self._process_lines(lines).iloc[0,0])
assert num_objectives == self._param_2_value["num_objs"], \
"the number of objectives in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_objectives, self._param_2_value["num_objs"])
# read the number of objects
num_continuous_vars = self._param_2_value["num_continuous_vars"]
assert num_continuous_vars == 0, "there should not be any continuous variables"
num_binary_vars = self._param_2_value["num_binary_vars"]
num_objects = int(self._process_lines(lines).iloc[0,0])
assert num_objects == num_binary_vars, \
"the number of objects in the data file is not equal to the number of binary variables in the " \
"configuration, '%d' != '%d'" % (num_objects, num_continuous_vars + num_binary_vars)
# read the knapsack capacities
self._rhs = self._process_lines(lines).iloc[0, :]
num_constraints = len(self._rhs)
assert num_constraints == self._param_2_value["num_constraints"], \
"the number of constraints in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_constraints, self._param_2_value["num_constraints"])
# read the objective function coefficients
self._continuous_var_obj_coeff_df = pd.DataFrame()
self._integer_var_obj_coeff_df = self._process_lines(lines, to_index=num_objectives).T
# read the constraint coefficients
self._constraint_coeff_df = self._process_lines(lines, to_index=num_constraints)
def _process_lines(self, lines, from_index=0, to_index=1):
"""Processes the lines between the indices, removes the processed lines, and returns the data frame for the
processed data"""
rows = []
for line in lines[from_index:to_index]:
for char in KnapsackFileInstanceData._ESCAPED_CHARACTERS:
line = line.replace(char, "")
line = line.split(KnapsackFileInstanceData._NEW_LINE_SEPARATOR)[0]
values = line.split(KnapsackFileInstanceData._LINE_DELIMITER)
if values[-1][-1] == ",":
values[-1] = values[-1][:-1]
if not values[-1]:
values = values[:-1]
rows.append(values)
del lines[from_index:to_index]
df = pd.DataFrame(rows, dtype='float')
return df
class MomilpFileInstanceData(MomilpInstanceData):
"""Implements a MOMILP instance data retrived from a file
NOTE: Based on the data input schema defined in Boland et al. (2015):
A '.txt' file describing a bi-objective problem
Line 1: Number of constraints, m
Line 2: Number of continuous variables, n_c
Line 3: Number of binary variables, n_b
Line 4: Array of coefficients for the first objective and the continuous variables, c^{1}
Line 5: Array of coefficients for the first objective and the binary variables, f^{1}
Line 6: Array of coefficients for the second objective and the continuous variables, c^{2}
Line 7: Array of coefficients for the second objective and the binary variables, f^{2}
Next 'n_c' lines: Array of constraint matrix coefficients for the continuous variables, a_{i,j}
Next line: Array of constraint matrix coefficients for the binary variables, a^{'}_{j}
Next line: Array of constraint right-hand-side values, b_j
The instance is converted to a three-obj problem by creating an additional objective with all zero coefficients.
"""
_INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER = 1/3
_LINE_DELIMITER = " "
_NEW_LINE_SEPARATOR = "\n"
def __init__(self, file_name, param_2_value):
super(MomilpFileInstanceData, self).__init__(param_2_value)
self._file_name = file_name
self._create()
def _create(self):
"""Creates the instance data"""
lines = []
with open(self._file_name, "r") as f:
lines = f.readlines()
num_constraints = int(self._process_lines(lines).iloc[0,0])
assert num_constraints == self._param_2_value["num_constraints"], \
"the number of constraints in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_constraints, self._param_2_value["num_constraints"])
num_continuous_vars = int(self._process_lines(lines).iloc[0,0])
assert num_continuous_vars == self._param_2_value["num_continuous_vars"], \
"the number of continuous vars in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_continuous_vars, self._param_2_value["num_continuous_vars"])
num_binary_vars = int(self._process_lines(lines).iloc[0,0])
assert num_binary_vars == self._param_2_value["num_binary_vars"], \
"the number of binary vars in the data file is not equal to the configuration parameter value, " \
"'%d' != '%d'" % (num_binary_vars, self._param_2_value["num_binary_vars"])
# since we solve the BOMILP as TOMILP in the momilp solver, and the default discrete obj index is zero, we
# create zero arrays as the coefficient vectors for the first objective
self._continuous_var_obj_coeff_df = pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))
self._integer_var_obj_coeff_df = pd.DataFrame(np.zeros(shape=(1, num_binary_vars)))
self._continuous_var_obj_coeff_df = self._continuous_var_obj_coeff_df.append(self._process_lines(lines))
self._integer_var_obj_coeff_df = self._integer_var_obj_coeff_df.append(self._process_lines(lines))
self._continuous_var_obj_coeff_df = self._continuous_var_obj_coeff_df.append(
self._process_lines(lines)).reset_index(drop=True).T
self._integer_var_obj_coeff_df = self._integer_var_obj_coeff_df.append(
self._process_lines(lines)).reset_index(drop=True).T
continuous_var_columns = [i for i in range(num_continuous_vars)]
binary_var_columns = [len(continuous_var_columns) + i for i in range(num_binary_vars)]
continuous_var_constraint_df = self._process_lines(lines, to_index=num_continuous_vars).T
continuous_var_constraint_df = continuous_var_constraint_df.append(
pd.DataFrame(np.zeros(shape=(1, num_continuous_vars)))).reset_index(drop=True)
continuous_var_constraint_df.columns = continuous_var_columns
binary_var_constraint_df = pd.DataFrame(np.diag(self._process_lines(lines).iloc[0,:])).append(
pd.DataFrame(np.zeros(shape=(num_constraints - num_binary_vars - 1, num_binary_vars)))).append(
pd.DataFrame(np.ones(shape=(1, num_binary_vars)))).reset_index(drop=True)
binary_var_constraint_df.columns = binary_var_columns
self._constraint_coeff_df = pd.concat([continuous_var_constraint_df, binary_var_constraint_df], axis=1)
binary_var_sum_rhs = num_binary_vars * MomilpFileInstanceData._INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER
self._rhs = self._process_lines(lines).iloc[0, :].append(pd.Series(binary_var_sum_rhs)).reset_index(drop=True)
def _process_lines(self, lines, from_index=0, to_index=1):
"""Processes the lines between the indices, removes the processed lines, and returns the data frame for the
processed data"""
rows = []
for line in lines[from_index:to_index]:
line = line.split(MomilpFileInstanceData._NEW_LINE_SEPARATOR)[0]
values = line.split(MomilpFileInstanceData._LINE_DELIMITER)
if not values[-1]:
values = values[:-1]
rows.append(values)
del lines[from_index:to_index]
df = pd.DataFrame(rows, dtype='float')
return df
def constraint_coeff_df(self):
"""Returns the constraint coefficient data frame
NOTE: An (m by n) matrix where rows are constraints and columns are variables"""
return self._constraint_coeff_df
def continuous_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the continuous variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._continuous_var_obj_coeff_df
def integer_var_obj_coeff_df(self):
"""Returns the objective functions coefficients data frame for the integer variables
NOTE: An (m by n) matrix where rows are variables and columns are objective functions"""
return self._integer_var_obj_coeff_df
def rhs(self):
"""Returns the right-hand-side values of the constraints
NOTE: A series of length m"""
return self._rhs
class MomilpRandomInstanceData(MomilpInstanceData):
"""Implements a MOMILP random instance data
NOTE: Based on the data generation schema defined in Mavrotas and Diakoulaki (2005) and Boland et al. (2015)"""
_INTEGER_VARIABLE_SUM_CONTRAINT_RHS_MULTIPLIER = 1/3
def __init__(self, param_2_value, np_rand_num_generator_seed=0):
| np.random.seed(np_rand_num_generator_seed) | numpy.random.seed |
#!/usr/bin/python
"""calculate_skystats.py -- Mask stars aggressively, together with input mask, determine sky background stats in remaining pixels. Stats are
Average
Median
Standard deviation of x by x pixels's average values
(y boxes are placed randomly)
Note that for using aplpy to plot location of randomly placed sky areas, the vmin and vmax for fits image is hard coded in right now. So if you get black png files, that's why. For randomly placed boxes, take in vmin, vmax command line options; but not for annuli! Should also write in a remove created files option. Need to update to be an importable module.
Required input
fitsimage - image for which background stats are desired.
Usage:
calculate_skystats.py [-h] [-v] [-b STRING] [-a STRING] [--annulusallover STRING] [-n INT] [-m FILE] [-s SEXLOC] [--vmin FLOAT] [--vmax FLOAT] <fitsimage>
Options:
-h, --help Print this screen.
-v, --verbose Print extra information [default: False]
-b STRING, --box STRING Input box parameters: size of box (pixels), number of random boxes to be placed. E.g. 8,1000.
-a STRING, --annulus STRING Select annulus with params 'xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2', angle in degrees, counter clockwise rotation; place random annuli around galaxy.
--annulusallover STRING Select annulus with params 'xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2', angle in degrees, counter clockwise rotation; place annulus around galaxy, but let it move around a little more than above option.
-n INT, --niterations INT Input number of random annuli to be placed. (not used for boxes). [default: 100]
-m FILE, --mask FILE Required. Input mask to be combined with grown sextractor mask. Important to input a mask that masks out the galaxy and its extended low surface brightness features!
-s SEXLOC, --sex SEXLOC SExtractor location [default: /opt/local/bin/sex]
--vmin FLOAT For plotting up box/ annuli locations [default: 4.25]
--vmax FLOAT For plotting up box/ annuli locations [default: 4.32]
Example:
python calculate_skystats.py -v fitsimage.fits
"""
import docopt
import numpy as np
import astropy.io.fits as fits
from scipy import ndimage
import subprocess
import os, sys, copy
import matplotlib.pyplot as plt
import aplpy
sextractor_params = """NUMBER
FLUX_AUTO
FLUXERR_AUTO
FLUX_APER
FLUXERR_APER
X_IMAGE
Y_IMAGE
X_WORLD
Y_WORLD
FLUX_RADIUS
FLAGS
CLASS_STAR
BACKGROUND
ELLIPTICITY
FWHM_IMAGE
"""
sextractor_config = """
ANALYSIS_THRESH 3
BACK_FILTERSIZE 3
BACKPHOTO_TYPE LOCAL
BACK_SIZE 32
CATALOG_NAME test.cat
CATALOG_TYPE ASCII_HEAD
CHECKIMAGE_TYPE SEGMENTATION
CHECKIMAGE_NAME {check_name}
CLEAN Y
CLEAN_PARAM 1.
DEBLEND_MINCONT 0.001
DEBLEND_NTHRESH 32
DETECT_MINAREA 5
DETECT_THRESH 3
DETECT_TYPE CCD
FILTER Y
FILTER_NAME {filter_name}
FLAG_IMAGE flag.fits
GAIN 1.0
MAG_GAMMA 4.
MAG_ZEROPOINT 0.0
MASK_TYPE CORRECT
MEMORY_BUFSIZE 1024
MEMORY_OBJSTACK 3000
MEMORY_PIXSTACK 300000
PARAMETERS_NAME {parameters_name}
PHOT_APERTURES 5
PHOT_AUTOPARAMS 2.5, 3.5
PIXEL_SCALE 2.85
SATUR_LEVEL 50000.
SEEING_FWHM 2.5
STARNNW_NAME {starnnw_name}
VERBOSE_TYPE {verbose_type}
"""
default_conv = """CONV NORM
# 3x3 ``all-ground'' convolution mask with FWHM = 2 pixels.
1 2 1
2 4 2
1 2 1
"""
default_nnw = """NNW
# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
# inputs: 9 for profile parameters + 1 for seeing.
# outputs: ``Stellarity index'' (0.0 to 1.0)
# Seeing FWHM range: from 0.025 to 5.5'' (images must have 1.5 < FWHM < 5 pixels)
# Optimized for Moffat profiles with 2<= beta <= 4.
3 10 10 1
-1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
-2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
-5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
-8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
-3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
0.00000e+00
1.00000e+00
"""
def run_SExtractor(image_name):
'Create temporary directory'
if not os.path.exists('tmpSkystats'):
os.makedirs('tmpSkystats')
else:
print('./tmpSkystats directory existed already')
'Names of required config files'
sextractor_config_name = './tmpSkystats/scamp.sex'
params_name = './tmpSkystats/scamp.param'
conv_name = './tmpSkystats/default.conv'
nnw_name = './tmpSkystats/default.nnw'
catalog_name = image_name.split('.fits')[0]+'_bkg.cat'
check_name = image_name.split('.fits')[0]+'_bkg_segmap.fits'
if verbose:
verbose_type = 'NORMAL'
else:
verbose_type = 'QUIET'
'Stick content in config files'
configs = zip([sextractor_config_name,params_name,conv_name,nnw_name],[sextractor_config,sextractor_params,default_conv,default_nnw])
for fname,fcontent in configs:
fout = open(fname,'w')
if 'scamp.sex' in fname:
fout.write(fcontent.format(filter_name=conv_name,
parameters_name=params_name,starnnw_name=nnw_name,
verbose_type=verbose_type,check_name=check_name))
else:
fout.write(fcontent)
fout.close()
if verbose:
print('SExtracting...')
'SExtractor command'
command = sexloc + ' -c {config} -CATALOG_NAME {catalog} {image}'.format(config=sextractor_config_name,catalog=catalog_name,image=image_name)
if verbose:
print('Running this command:')
print(command+'\n')
'Run SExtractor'
subprocess.call(command,shell=True)
'Clear unnecessary files'
for fname in [sextractor_config_name,params_name,conv_name,nnw_name]:
clearit(fname)
'Remove temp directory if its not empty'
try:
os.rmdir('tmpSkystats')
except OSError as ex:
if ex.errno == errno.ENOTEMPTY:
print("directory not empty")
return check_name
def clearit(fname):
if os.path.isfile(fname):
os.remove(fname)
return None
def writeFITS(im,saveAs,header=None):
if header != None:
hdu = fits.PrimaryHDU(data=im,header=header)
else:
hdu = fits.PrimaryHDU(data=im)
hdulist = fits.HDUList([hdu])
hdulist.writeto(saveAs,overwrite=True)
hdulist.close()
return None
def calculate_sky_box(fitsimage,image,total_mask,boxsize_pix,nboxes,vmin=4.25,vmax=4.32):
'''Place nboxes boxsize_pix sized boxes randomly in image with total_mask, calculate average in each box and standard deviation of averages'''
sky_counts = []
pix_counts = []
n_counter = 0
n_notfinite = 0
# Read in image size and set boxes
# to be placed not too near edge
h = fits.getheader(fitsimage)
xmin = 1.5*boxsize_pix
ymin = 1.5*boxsize_pix
xmax = float(h['NAXIS1'])-1.5*boxsize_pix
ymax = float(h['NAXIS2'])-1.5*boxsize_pix
# Start figure to plot up box locations
fig = plt.figure(figsize=(48, 36))
f1 = aplpy.FITSFigure(fitsimage,figure=fig)
#f1.set_tick_labels_font(size='xx-small')
f1.ticks.hide()
f1.tick_labels.hide_x()
f1.tick_labels.hide_y()
f1.axis_labels.hide()
f1.show_grayscale(invert=True, stretch='linear', vmin=vmin, vmax=vmax)
xlen = int(h['NAXIS2'])
ylen = int(h['NAXIS1'])
xtomesh = np.arange(0, ylen, 1)
ytomesh = np.arange(0, xlen, 1)
X, Y = np.meshgrid(xtomesh, ytomesh)
while n_counter <= nboxes:
# Choose a random spot
row = np.random.randint(low=ymin,high=ymax)
col = np.random.randint(low=xmin,high=xmax)
# Make a box
image_box = image[row-int(boxsize_pix/2):row+int(boxsize_pix/2)+1,col-int(boxsize_pix/2):col+int(boxsize_pix/2)+1]
mask_box = total_mask[row-int(boxsize_pix/2):row+int(boxsize_pix/2)+1,col-int(boxsize_pix/2):col+int(boxsize_pix/2)+1]
# Plot up location of box for display using show_contour
display_mask = np.zeros((xlen,ylen))
display_mask[row-int(boxsize_pix/2):row+int(boxsize_pix/2)+1,col-int(boxsize_pix/2):col+int(boxsize_pix/2)+1] = 1.0
CS = plt.contour(X, Y, display_mask,linewidths=1.0,alpha=0.1,colors='red')
# Measure average counts in this masked box
counts = np.ma.mean(np.ma.masked_array(image_box,mask=mask_box))
# Measure number of pixels not masked in this masked box
no_pixels_notmasked = np.sum(mask_box)
# Add average to sky_counts if finite
# Also increment box count
# Else increment n_notfinite
if np.isfinite(counts):
sky_counts.append(counts)
pix_counts.append(no_pixels_notmasked)
n_counter += 1
else:
n_notfinite += 1
# Save figure to of annuli locations
outname = './skyregionlocs.png'
f1.save(outname)
print(' ')
print('***OUTPUT: Box location plot saved here: ',outname)
return sky_counts, pix_counts, n_notfinite, h
def read_annulusparams(annulusparams):
'''Read out annulus parameters of form xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2'''
params = annulusparams.split(',')
xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2 = params
return float(xc1),float(yc1),float(a1),float(b1),float(ang1),float(xc2),float(yc2),float(a2),float(b2),float(ang2)
def make_annulus_mask(xlen,ylen,xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2):
'''Read in annulus parameters and create grabber of annulus (1 inside and 0 outside)'''
ang1_rad = (ang1/360.)*2*np.pi
ang2_rad = (ang2/360.)*2*np.pi
# Ellipse 1
mask1 = np.zeros((xlen,ylen))
xv,yv = np.meshgrid(np.linspace(0,xlen-1,xlen),np.linspace(0,ylen-1,ylen))
A = ( (xv-xc1)*np.cos(ang1_rad) + (yv-yc1)*np.sin(ang1_rad) )**2 / a1**2
B = ( (xv-xc1)*np.sin(ang1_rad) - (yv-yc1)*np.cos(ang1_rad) )**2 / b1**2
xi,yi = np.where( A+B < 1.0 )
mask1[xi,yi] = 1
# Ellipse 2
mask2 = np.zeros((xlen,ylen))
A = ( (xv-xc2)*np.cos(ang2_rad) + (yv-yc2)*np.sin(ang2_rad) )**2 / a2**2
B = ( (xv-xc2)*np.sin(ang2_rad) - (yv-yc2)*np.cos(ang2_rad) )**2 / b2**2
xi,yi = np.where( A+B < 1.0 )
mask2[xi,yi] = 1
# Combine Ellipse 1 and 2 --> annulus
mask3 = np.ones((xlen,ylen)).astype(int)
tmp = mask1+mask2
xi,yi = np.where(tmp == 1.0)
mask3[xi,yi] = 0
return mask3.astype(bool)
def calculate_sky_annuli(fitsimage,image,total_mask,annulusparams,n_iterations):
'''Save sky count averages in n_iteration annuli, also plot up where random n_iterations of annuli were placed on fits image.'''
# Calculate sky in input annulus
xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2 = read_annulusparams(annulusparams)
h = fits.getheader(fitsimage)
xlen = int(h['NAXIS2'])
ylen = int(h['NAXIS1'])
mask = make_annulus_mask(xlen,ylen,xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2)
initial_annuli_mask_data = mask.copy()
image_annuli = copy.copy(image)
image_annuli[mask] = float('nan')
image_annuli[total_mask] = float('nan')
initial_annuli_name = 'annuli_input.fits'
writeFITS(image_annuli,initial_annuli_name)
print(' ')
print('***OUTPUT: Sky calculation annulus saved here: ',initial_annuli_name)
print(' ')
print('Average in input sky annulus is: ',np.nanmean(image_annuli))
print('Median in input sky annulus is: ',np.nanmedian(image_annuli))
print('Std in input sky annulus is: ',np.nanstd(image_annuli))
print('Number of finite non masked pixels in input sky annulus: ',np.sum(np.isfinite(image_annuli)))
# Plonk some random annuli, calculate average of averages and std of averages
# Vary xc,yc within width of annuli randomly (move xc2,yc2 by same amount)
# AND vary a1 randomly while keeping a1-a2 constant, varations up to width of annuli
annuli_thickness = abs(a1-a2)/2.
# Start figure to plot up annuli locations
fig = plt.figure(figsize=(48, 36))
f1 = aplpy.FITSFigure(fitsimage,figure=fig)
#f1.set_tick_labels_font(size='xx-small')
f1.ticks.hide()
f1.tick_labels.hide_x()
f1.tick_labels.hide_y()
f1.axis_labels.hide()
f1.show_grayscale(invert=True, stretch='linear', vmin=4.25, vmax=4.32)
# for g-band ngc 2841: vmin=2.38, vmax=2.42
sky_counts = []
pix_counts = []
n_counter = 0
n_notfinite = 0
xtomesh = np.arange(0, ylen, 1)
ytomesh = np.arange(0, xlen, 1)
X, Y = np.meshgrid(xtomesh, ytomesh)
while n_counter < n_iterations:
# Choose X random values for xc,yc and a1
xc_shift = np.random.randint(low=-annuli_thickness,high=annuli_thickness)
yc_shift = np.random.randint(low=-annuli_thickness,high=annuli_thickness)
a1_shift = np.random.randint(low=-annuli_thickness,high=annuli_thickness)
new_xc1 = xc1+xc_shift
new_xc2 = xc2+xc_shift
new_yc1 = yc1+yc_shift
new_yc2 = yc2+yc_shift
new_a1 = a1+a1_shift
new_a2 = a2+a1_shift
new_b1 = (b1/a1)*(new_a1)
new_b2 = (b2/a2)*(new_a2)
# Make mask for new annuli
mask = make_annulus_mask(xlen,ylen,
new_xc1,new_yc1,new_a1,new_b1,ang1,
new_xc2,new_yc2,new_a2,new_b2,ang2)
image_annuli = copy.copy(image)
image_annuli[mask] = float('nan')
image_annuli[total_mask] = float('nan')
# Plot up location annulus for display using show_contour
CS = plt.contour(X, Y, mask,linewidths=1.0,alpha=0.1,colors='red')
# Calculate average and number of pixels in average to array
#counts = 3.*np.nanmedian(image_annuli) - 2.*np.nanmean(image_annuli)
counts = np.nanmean(image_annuli)
# Add average to sky_counts if finite
# Also increment n_counter
# Else increment n_notfinite
if np.isfinite(counts):
sky_counts.append(counts)
pix_counts.append(np.sum(np.isfinite(image_annuli)))
n_counter += 1
else:
n_notfinite += 1
# Increment counter
n_counter += 1
# Plot initial sky ellipse
# Copy wcs to total_mask_name, and show initial ellipse contour
CS = plt.contour(X, Y, initial_annuli_mask_data,linewidths=6.0,colors='green')
# Save figure to of annuli locations
outname = './skyregionlocs.png'
f1.save(outname)
print(' ')
print('***OUTPUT: Annuli location plot saved here: ',outname)
if verbose:
print('Number of annuli placed randomly is: ',n_counter)
return sky_counts, pix_counts, n_notfinite, h
def copy_wcs(fits_withwcs,fits_withoutwcs):
h = fits.getheader(fits_withwcs)
f = fits.open(fits_withoutwcs)
newf = fits.PrimaryHDU()
newf.header = f[0].header
newf.data = f[0].data
newf.header['CTYPE1'] = h['CTYPE1']
newf.header['CRPIX1'] = h['CRPIX1']
newf.header['CRVAL1'] = h['CRVAL1']
newf.header['CTYPE2'] = h['CTYPE2']
newf.header['CRPIX2'] = h['CRPIX2']
newf.header['CRVAL2'] = h['CRVAL2']
newf.header['CD1_1'] = h['CD1_1']
newf.header['CD1_2'] = h['CD1_2']
newf.header['CD2_1'] = h['CD2_1']
newf.header['CD2_2'] = h['CD2_2']
#newf.header['RADECSYS'] = h['RADECSYS']
newf.header['EQUINOX'] = h['EQUINOX']
saveloc = fits_withoutwcs.split('.')[0]+'_wcs.fits'
newf.writeto(saveloc, overwrite=True)
return saveloc
def calculate_sky_annuli_alloverim(fitsimage,image,total_mask,annulusparams,n_iterations):
'''Save sky count averages in n_iteration annuli, also plot up where random n_iterations of annuli were placed on fits image.'''
# Calculate sky in input annulus
xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2 = read_annulusparams(annulusparams)
h = fits.getheader(fitsimage)
xlen = int(h['NAXIS2'])
ylen = int(h['NAXIS1'])
mask = make_annulus_mask(xlen,ylen,xc1,yc1,a1,b1,ang1,xc2,yc2,a2,b2,ang2)
initial_annuli_mask_data = mask.copy()
image_annuli = copy.copy(image)
image_annuli[mask] = float('nan')
image_annuli[total_mask] = float('nan')
initial_annuli_name = 'annuli_input.fits'
writeFITS(image_annuli,initial_annuli_name)
print(' ')
print('***OUTPUT: Sky calculation annulus saved here: ',initial_annuli_name)
print(' ')
print('Average in input sky annulus is: ',np.nanmean(image_annuli))
print('Median in input sky annulus is: ',np.nanmedian(image_annuli))
print('Std in input sky annulus is: ',np.nanstd(image_annuli))
print('Number of finite non masked pixels in input sky annulus: ',np.sum(np.isfinite(image_annuli)))
# Plonk some random annuli, calculate average of averages and std of averages
# Vary xc,yc within width of annuli randomly (move xc2,yc2 by same amount)
# AND vary a1 randomly while keeping a1-a2 constant, varations up to width of annuli
annuli_thickness = abs(a1-a2)/2.
# Start figure to plot up annuli locations
fig = plt.figure(figsize=(48, 36))
f1 = aplpy.FITSFigure(fitsimage,figure=fig)
#f1.set_tick_labels_font(size='xx-small')
f1.ticks.hide()
f1.tick_labels.hide_x()
f1.tick_labels.hide_y()
f1.axis_labels.hide()
f1.show_grayscale(invert=True, stretch='linear', vmin=4.25, vmax=4.32)
# g-band ngc 2841 vmin=2.38, vmax=2.42
sky_counts = []
pix_counts = []
n_counter = 0
n_notfinite = 0
xtomesh = | np.arange(0, ylen, 1) | numpy.arange |
#!/usr/bin/env python
import os, sys
import fnmatch
import numpy as np
import scipy.io.wavfile
import scipy.signal
import pyaudio
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row, column, widgetbox, gridplot
from bokeh.models import ColumnDataSource, Span, BoxAnnotation, Range1d
from bokeh.models.tools import \
CrosshairTool, BoxZoomTool, BoxSelectTool, HoverTool, \
PanTool, ResetTool, SaveTool, TapTool, WheelZoomTool
from bokeh.models.widgets import Div, Slider, TextInput, PreText, Select, Button
from bokeh.plotting import figure, output_file, output_notebook, show
from bokeh.document import without_document_lock
from tornado import gen
def play_all():
audata = orig_au.astype(np.int16).tostring()
stream.write(audata)
def play_sel():
sel = gp.select_one(dict(tags=['cursel']))
samp1 = np.int(sel.left * orig_rate)
samp2 = np.int(sel.right * orig_rate)
audata = orig_au[samp1:samp2].astype(np.int16).tostring()
stream.write(audata)
def get_filenames():
'''Walk datadir and get all .wav filenamess.'''
files = []
for root, dirnames, fnames in os.walk(datadir):
for fname in fnmatch.filter(fnames, '*.wav'):
files.append(os.path.join(root, fname))
return files
def load_file(attrname, old, wav):
global au, orig_au, rate, orig_rate, timepts, df, stream
global xrng, yrng
(orig_rate, au) = scipy.io.wavfile.read(wav)
orig_au = au.copy()
if stream is None:
# Set up the audio stream for playback.
pya = pyaudio.PyAudio()
stream = pya.open(
format = pyaudio.paInt16,
channels = 1,
rate = np.int(orig_rate),
output = True)
decim_factor = 16
au = scipy.signal.decimate(au, decim_factor)
rate = orig_rate / decim_factor
timepts = np.arange(0, len(au)) / rate
source.data['x'] = timepts
source.data['au'] = au
x_range.update(end=timepts[-1])
# Now load the tongue data
tngfile = os.path.splitext(wav)[0] + '.txy'
palfile = os.path.join(os.path.dirname(wav), 'PAL.DAT')
phafile = os.path.join(os.path.dirname(wav), 'PHA.DAT')
df = pd.read_csv(
tngfile,
sep='\t',
names=[
'sec', 'ULx', 'ULy', 'LLx', 'LLy', 'T1x', 'T1y', 'T2x', 'T2y',
'T3x', 'T3y', 'T4x', 'T4y', 'MIx', 'MIy', 'MMx', 'MMy'
]
)
# Convert to seconds
df['sec'] = df['sec'] / 1e6
df = df.set_index(['sec'])
# Convert to mm
df[[
'ULx', 'ULy', 'LLx', 'LLy', 'T1x', 'T1y', 'T2x', 'T2y',
'T3x', 'T3y', 'T4x', 'T4y', 'MIx', 'MIy', 'MMx', 'MMy'
]] = df[[
'ULx', 'ULy', 'LLx', 'LLy', 'T1x', 'T1y', 'T2x', 'T2y',
'T3x', 'T3y', 'T4x', 'T4y', 'MIx', 'MIy', 'MMx', 'MMy'
]] * 1e-3
# Find global x/y max/min in this recording to set axis limits.
# Exclude bad values (1000000 in data file; 1000 mm in scaled dataframe).
cmpdf = df[df < badval]
xmax = np.max(
np.max(
cmpdf[['ULx','LLx','T1x', 'T2x', 'T3x', 'T4x', 'MIx', 'MMx']]
)
)
xmin = np.min(
np.min(
cmpdf[['ULx','LLx','T1x', 'T2x', 'T3x', 'T4x', 'MIx', 'MMx']]
)
)
ymax = np.max(
np.max(
cmpdf[['ULy','LLy','T1y', 'T2y', 'T3y', 'T4y', 'MIy', 'MMy']]
)
)
ymin = np.min(
np.min(
cmpdf[['ULy','LLy','T1y', 'T2y', 'T3y', 'T4y', 'MIy', 'MMy']]
)
)
# TODO: this works but produces SettingWithCopyWarning
# will not have to use this when bokeh can handle NaN in plots
# xdf = df[[
# 'ULx', 'LLx', 'T1x', 'T2x',
# 'T3x', 'T4x', 'MIx', 'MMx'
# ]]
# xmax = np.max(np.max(xdf[xdf < badval]))
# xdf[xdf == badval] = xrng[1]
# ydf = df[[
# 'ULy', 'LLy', 'T1y', 'T2y',
# 'T3y', 'T4y', 'MIy', 'MMy'
# ]]
# ymax = np.max(np.max(ydf[ydf < badval]))
# ydf[ydf == badval] = yrng[1]
# df = pd.concat([xdf, ydf], axis=1)
paldf = pd.read_csv(palfile, sep='\s+', header=None, names=['x', 'y'])
paldf = paldf * 1e-3
palsource.data = {'x': paldf['x'], 'y': paldf['y']}
phadf = pd.read_csv(phafile, sep='\s+', header=None, names=['x', 'y'])
phadf = phadf * 1e-3
phasource.data = {'x': phadf['x'], 'y': phadf['y']}
xmin = np.min([xmin, np.min(paldf['x']), np.min(phadf['x'])])
xmax = np.max([xmax, np.max(paldf['x']), np.max(phadf['x'])])
ymin = np.min([ymin, np.min(paldf['y']), | np.min(phadf['y']) | numpy.min |
import numpy as np
import h5py as h
import numpy as np
import matplotlib as mpl
mpl.use('Qt5Agg')
"""
USEFUL METHODS.
"""
def toPoint(point):
point = np.array(point)
point[1] *= -1
return point-250
def toIndex(point):
point = np.array(point)
point[1] *= -1
return point+250
"""
START OF MASK STUFF.
"""
import imageio as io
arr = io.read('../scratch/testMask.png').get_data(0)
arr = np.int64(np.all(arr[:, :, :3] == 0, axis=2))
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Circle,Rectangle
# N pixels per mm.
pixelSize = 10
# Beam height in mm converted to pixels.
beamHeight = 0.5
# Look at beam position (center position in mm).
_lookAt = 10
_beamB = _lookAt - beamHeight/2
_beamT = _lookAt + beamHeight/2
# Initialise mask start and stop positions.
# Start and stop are index positions of the array.
start = [0,0]
stop = [0,0]
# Find mask horizontal start and stop positions.
for row in range(arr.shape[0]):
# Find the first row with a 0 in it.
if np.sum(arr[row,:]) != arr.shape[0]:
# Find the middle position of all the values that are 0.
middle = np.argwhere(arr[row,:] == 0).mean()
# Store the start position.
start = [row,middle]
break
for row in reversed(range(arr.shape[0])):
if np.sum(arr[row,:]) != arr.shape[0]:
middle = np.argwhere(arr[row,:] == 0).mean()
stop = [row,middle]
break
# Set diameter for the mask (in mm).
_radius = 25
# Create datapoints for two half circles in degrees.
leftCircleAngle = np.linspace(90, 270, 2000)
rightCircleAngle = np.linspace(90, -90, 2000)
# Find the tangent values of the points in each half circle.
leftCircleTangent = np.tan(np.deg2rad(leftCircleAngle))
rightCircleTangent = np.tan(np.deg2rad(rightCircleAngle))
# Get subArray of mask.
# subArray = arr[b:t,:]
# Investigate beam area:
_bt = int( np.absolute(25-_beamT)*10 )
_bb = int( np.absolute(25-_beamB)*10 )
subArray = arr[_bt:_bb,:]
# Get the top and bottom line of the sub array.
line1 = subArray[0,:]
line2 = subArray[-1,:]
# Find the left and right most points for each line.
line1 = np.argwhere(line1 == 0)
line2 = np.argwhere(line2 == 0)
tl = line1.min()
tr = line1.max()
bl = line2.min()
br = line2.max()
# Calculate the tangent for each side.
left = np.arctan(((tl-bl)/pixelSize)/beamHeight)
right = np.arctan(((tr-br)/pixelSize)/beamHeight)
# Find the tangent condition that matches in the circle.
leftAngle = np.deg2rad(leftCircleAngle[ np.argmin(np.absolute(leftCircleTangent-left)) ])
rightAngle = np.deg2rad(rightCircleAngle[ np.argmin(np.absolute(rightCircleTangent-right)) ])
# Find the position of the mask that matches the tangent condition.
circleLeftPosition = np.array([_radius*np.cos(leftAngle),-_radius*np.sin(leftAngle)])
circleRightPosition = np.array([_radius*np.cos(rightAngle),-_radius*np.sin(rightAngle)])
# Get the position of the matched pixel.
x1 = (0 + np.min(np.array([tl,bl])) + np.absolute(tl-bl)/2)/pixelSize
y1 = (_bt + subArray.shape[0]/2)/pixelSize
pos1 = np.array([-25+x1,25-y1])
move1 = circleLeftPosition - pos1
# Right circle.
x2 = (0 + np.min(np.array([tr,br])) + np.absolute(tr-br)/2)/pixelSize
y2 = (_bt + subArray.shape[0]/2)/pixelSize
pos2 = | np.array([-25+x2,25-y2]) | numpy.array |
from time import time
import cv2
import numpy as np
from scene import Scene
from light import Light
from camera import Camera
from game_object import GameObject
def triangle_area(v0, v1, v2):
"""
| v01[0] v01[1] |
| v02[0] v02[1] | = v01[0]*v02[1] - v01[1]*v02[0]
"""
return (v1[0]-v0[0])*(v2[1]-v0[1]) - (v1[1]-v0[1])*(v2[0]-v0[0])
def convert_map_for_vis(m, ignore=np.inf):
m = np.array(m)
m[m == ignore] = 1
m_min = np.min(m[m != 1])
m_max = np.max(m[m != 1])
m[m != 1] = (m[m != 1] - m_min) / (m_max - m_min) * 0.8
return m
def geometric_transform(scene):
world_to_camera = scene.camera.world_to_camera
world_to_light = scene.light.world_to_light
near_clip = scene.camera.near_clip
far_clip = scene.camera.far_clip
# for light.shadow_map_param
sxmin, sxmax = np.inf, -np.inf
symin, symax = np.inf, -np.inf
for obj in scene.objects:
for name, mesh in obj.mesh.mesh.items():
mesh_format = mesh['format'] # V: vertex, N: normal, T: texture
if mesh_format == 'V3F':
step = 3
elif mesh_format == 'N3F_V3F':
step = 6
elif mesh_format == 'T2F_V3F':
step = 5
elif mesh_format == 'T2F_N3F_V3F':
step = 8
else:
assert False, 'invalid mesh_format'
vertices = mesh['vertices']
for i in range(0, len(vertices), step*3):
# triangle vertex coordinates and scaling
v0 = np.array([[*vertices[i+1*step-3:i+1*step], 1]]).T * obj.scale
v1 = np.array([[*vertices[i+2*step-3:i+2*step], 1]]).T * obj.scale
v2 = np.array([[*vertices[i+3*step-3:i+3*step], 1]]).T * obj.scale
v0[3, 0] = 1
v1[3, 0] = 1
v2[3, 0] = 1
if False and 'N3F' in mesh_format:
# triangle vertex normal vectors
n0 = np.array([vertices[i+1*step-6:i+1*step-3]]).T
n1 = np.array([vertices[i+2*step-6:i+2*step-3]]).T
n2 = np.array([vertices[i+3*step-6:i+3*step-3]]).T
else:
# if the model does not provide normal vectors, generate normal from triangle vertices
n = | np.cross((v1-v0)[:3, 0], (v2-v0)[:3, 0]) | numpy.cross |
# Copyright 2020 <NAME>, MIT license
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colorbar import ColorbarBase
from matplotlib.dates import DateFormatter
import numpy as np
from statsmodels.robust.robust_linear_model import RLM
import statsmodels.api as sm
from plot_maps import get_bounds, get_cmap, get_colors
from traveltime import repack, filters, LATLON0
from util.imaging import convert_coords2km
def fit_velocity(dist, tmax1, plot=True):
# rlm = RLM(dist/1000 , np.abs(tmax1))
# rlm = RLM(np.abs(tmax1), dist/1000, M=sm.robust.norms.Hampel(1, 2, 4))
s = dist / 1000
t = np.abs(tmax1)
o = np.ones(len(t))
# rlm = RLM(d/t, np.ones(len(d)), M=sm.robust.norms.Hampel(1, 2, 4))
rlm = RLM(s/t, o, M=sm.robust.norms.Hampel(1, 2, 4))
res = rlm.fit(maxiter=100)
v = res.params[0]
w = res.weights
# scale = res.scale
# v = np.median(s/t)
from statsmodels.robust.scale import mad
scale = mad(s/t, center=v, c=1)
tmax = np.max(s) / v
if plot:
fig = plt.figure()
ax = fig.add_subplot(121)
ax.scatter(tmax1, dist)
ax.plot((-tmax, 0, tmax), (np.max(dist), 0, np.max(dist)))
ax2 = fig.add_subplot(122)
ax2.hist(dist/1000/np.abs(tmax1), bins=np.linspace(3, 5, 21))
return v, w, scale
def stats_vs_time(dist, tmax1, tmax2, evids, times1, times2, bounds):
ress = []
r = []
for t1, t2 in zip(bounds[:-1], bounds[1:]):
print(t1, t2)
cond = np.logical_and.reduce((times1 <= t2, times1 > t1, times2 <= t2, times2 > t1, tmax1 != 0))
dist_, tmax1_, tmax2_, t1_ = filters(cond, dist, tmax1, tmax2, times1)
ind = tmax2_!=0
tmax_=tmax1_
v, weights, scale = fit_velocity(dist_, tmax_, plot=False)
r.append((t1, v, weights, scale, dist_, tmax_))
ress.append('{!s:10} {:.3f} {} {:.3f}'.format(t1, np.round(v, 3), len(dist_), np.round(scale, 3)))
fig = plt.figure(figsize=(10, 8.5))
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332, sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(333, sharex=ax1, sharey=ax1)
ax4 = fig.add_subplot(334, sharex=ax1, sharey=ax1)
ax5 = fig.add_subplot(335, sharex=ax1, sharey=ax1)
ax6 = fig.add_subplot(336)
axes = [ax1, ax2, ax3, ax4, ax5]
colors = get_colors()
for i, (t1, v, weights, scale, dist, tmax1) in enumerate(r):
tmax = np.max(dist) / 1000 / v
c = colors[i]
ax = axes[i]
cmap = LinearSegmentedColormap.from_list('w_%d' % i, ['white', c])
ax.scatter(tmax1, dist, c=weights, cmap=cmap, edgecolors='k')
ax.plot((-tmax, 0, tmax), (np.max(dist), 0, np.max(dist)), color=c)
ax.annotate('N=%d' % len(dist), (0.03, 0.03), xycoords='axes fraction')
ax.annotate(r'$v_{{\rm{{S}}}}{{=}}({:.2f}{{\pm}}{:.2f})$km/s'.format(v, scale), (0.97, 0.03), xycoords='axes fraction', ha='right')
bins=np.linspace(2, 6, 41)
centers = 0.5 * (bins[:-1] + bins[1:])
heights = np.diff(bins)
for i, (t1, v, weights, scale, dist, tmax1) in enumerate(r):
c = colors[i]
vmedian = np.median(dist/1000/ | np.abs(tmax1) | numpy.abs |
#!/usr/bin/env python3
"""
Context objects for managing datasets.
"""
import numpy as np
from . import ic # noqa: F401
from . import _make_logger
# Set up ArrayContext logger
logger = _make_logger('ArrayContext', 'error') # or 'info'
def _exe_axis_moves(array, moves, reverse=False):
"""
Execute the input series of axis swaps.
"""
slice_ = slice(None, None, -1) if reverse else slice(None)
for move in moves[slice_]:
move = move[slice_]
array = np.moveaxis(array, *move)
logger.info(f'Move {move[0]} to {move[1]}: {array.shape}')
return array
def _get_axis_moves(push_left, push_right, left_base=0, right_base=-1):
"""
Get the series of axis swaps given the input dimensionality.
"""
logger.info(f'Push axes left: {push_left}')
logger.info(f'Push axes right: {push_right}')
moves = []
left_base = 0
right_base = -1
for i, axis in enumerate(push_right):
moves.append((axis, right_base))
for push in (push_left, push_right):
push[push > axis] -= 1 # NOTE: some of these changes have no effect
for axis in push_left:
moves.append((axis, left_base))
for push in (push_left, push_right):
push[push < axis] += 1 # NOTE: some of these changes have no effect
return np.array(moves)
class _ArrayContext(object):
"""
Temporarily reshape the input dataset(s). This is needed so we can do objective
analysis tasks "along an axis". Some tasks can be done by just moving axes and using
array[..., :] notation but this is not always possible. Should work with arbitrary
duck-type arrays, including dask arrays.
"""
def __init__(
self, *args,
push_right=None, push_left=None, nflat_right=None, nflat_left=None,
):
"""
Parameters
----------
*datas : numpy.ndarray
The arrays to be reshaped
push_left, push_right : int or list of int, optional
Axis or axes to move to the left or right sides. Axes are moved in the input
order. By default, if neither are provided, `push_right` is set to ``-1``.
nflat_left, nflat_right : int, optional
Number of dimensions to flatten on the left or right sides. By default, if
only `push_left` is provided, `nflat_right` is set to ``data.ndim -
len(push_left)``, and if only `push_right` is provided, `nflat_left` is set
to ``data.ndim - len(push_right)``.
Examples
--------
Here is a worked example used with the EOF algorithm:
>>> import logging
>>> import numpy as np
>>> import xarray as xr
>>> from climopy.internals.context import logger, _ArrayContext
>>> logger.setLevel(logging.INFO)
>>> # Generate neof, member, run, time, plev, lat array
>>> dataarray = xr.DataArray(
... np.random.rand(12, 8, 100, 40, 20),
... dims=('member', 'run', 'time', 'plev', 'lat'),
... )
>>> array = dataarray.data
>>> with _ArrayContext(
... array,
... push_left=(0, 1), nflat_left=2,
... push_right=(2, 3, 4), nflat_right=2,
... ) as context:
... data = context.data
... nextra, ntime, nspace = data.shape
... eofs = np.random.rand(nextra, 5, 1, nspace) # singleton time dimension
... pcs = np.random.rand(nextra, 5, ntime, 1) # singleton space dimension
... context.replace_data(eofs, pcs, insert_left=1)
>>> logger.setLevel(logging.ERROR)
>>> eofs, pcs = context.data
"""
# Set arrays
# NOTE: No array standardization here. Assume duck-type arrays (numpy
# arrays, pint quantities, xarray DataArrays, dask arrays).
if not args:
raise ValueError('Need at least one input argument.')
self._arrays = args
self._shapes = []
self._moves = []
ndim = self._arrays[0].ndim
# Parse axis arguments and ensure they are positive
if push_right is None and push_left is None:
push_right = -1
if push_right is None:
push_right = np.array([])
else:
push_right = np.atleast_1d(push_right)
if push_left is None:
push_left = np.array([])
else:
push_left = np.atleast_1d(push_left)
for push, side in zip((push_left, push_right), ('left', 'right')):
push[push < 0] += ndim
if any(push < 0) or any(push >= ndim) or np.unique(push).size != push.size:
raise ValueError(f'Invalid push_{side}={push} for {ndim}D array.')
self._push_left = push_left
self._push_right = push_right
# Parse nflat arguments. When user requests pushing to right, means we want
# to flatten the remaining left dims. Same goes for pushing to left.
# NOTE: There is distinction here between 'None' and '0'. The latter means
# add a singleton dimension (useful when iterating over 'extra' dimensions)
# while the former means add nothing.
if nflat_left is None and not push_left.size and push_right.size:
nflat_left = ndim - push_right.size
if nflat_right is None and not push_right.size and push_left.size:
nflat_right = ndim - push_left.size
self._nflat_left = nflat_left
self._nflat_right = nflat_right
def replace_data(self, *args, insert_left=None, insert_right=None):
"""
Replace the data attribute with new array(s).
Parameters
----------
*args : array-like, optional
The new arrays. The unflattened middle-dimensions can be changed. The
flattened leading or trailing dimensions can be reduced to singleton, but
otherwise must be identical or it is unclear how they should be re-expanded.
insert_left, insert_right : int, optional
Number of new dimensions added to the left or right of the array.
Dimensions can only be added to the left or the right of the
unflattened middle-dimensions of the array. For example, `climopy.eof`
adds a new `neof` dimension so that dimensions are transformed
from ``(nextra, ntime, nspace)`` to ``(nextra, neof, ntime, nspace)``.
Use lists of numbers to transform input arguments differently.
Examples
--------
Inserting new dimensions does not mess up the order of values in dimensions
that come before or after. This is revealed by playing with a simple example.
>>> a = np.array(
... [
... [[1, 2, 1], [3, 4, 3]],
... [[5, 6, 5], [7, 8, 7]],
... [[9, 10, 9], [11, 12, 11]],
... ]
... )
>>> a.shape
(3, 2, 3)
>>> a[:, 0, 0]
array([1, 5, 9])
>>> np.reshape(a, (3, 6), order='F')[:, 0]
array([1, 5, 9])
>>> np.reshape(a, (3, 6), order='C')[:, 0]
array([1, 5, 9])
"""
# Parse arguments
inserts_left, inserts_right = [], []
for inserts, insert in zip((inserts_left, inserts_right), (insert_left, insert_right)): # noqa: E501
insert = | np.atleast_1d(insert) | numpy.atleast_1d |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import bisect
import copy
import gc
import logging
import multiprocessing as mp
import os
import platform
import warnings
import numpy as np
from matplotlib.pyplot import figure, plot, show
from nilearn import masking
from scipy import ndimage
from sklearn.decomposition import PCA
import rapidtide.calccoherence as tide_calccoherence
import rapidtide.calcnullsimfunc as tide_nullsimfunc
import rapidtide.calcsimfunc as tide_calcsimfunc
import rapidtide.correlate as tide_corr
import rapidtide.filter as tide_filt
import rapidtide.fit as tide_fit
import rapidtide.glmpass as tide_glmpass
import rapidtide.helper_classes as tide_classes
import rapidtide.io as tide_io
import rapidtide.miscmath as tide_math
import rapidtide.multiproc as tide_multiproc
import rapidtide.peakeval as tide_peakeval
import rapidtide.refine as tide_refine
import rapidtide.resample as tide_resample
import rapidtide.simfuncfit as tide_simfuncfit
import rapidtide.stats as tide_stats
import rapidtide.util as tide_util
import rapidtide.wiener as tide_wiener
from rapidtide.tests.utils import mse
from .utils import setup_logger
try:
import mkl
mklexists = True
except ImportError:
mklexists = False
print("mklexists:", mklexists)
try:
from memory_profiler import profile
memprofilerexists = True
except ImportError:
memprofilerexists = False
LGR = logging.getLogger("GENERAL")
TimingLGR = logging.getLogger("TIMING")
def conditionalprofile():
def resdec(f):
if memprofilerexists:
return profile(f)
return f
return resdec
@conditionalprofile()
def memcheckpoint(message):
print(message)
def maketmask(filename, timeaxis, maskvector, debug=False):
inputdata = tide_io.readvecs(filename)
theshape = np.shape(inputdata)
if theshape[0] == 1:
# this is simply a vector, one per TR. If the value is nonzero, include the point, otherwise don't
if theshape[1] == len(timeaxis):
maskvector = np.where(inputdata[0, :] > 0.0, 1.0, 0.0)
else:
raise ValueError("tmask length does not match fmri data")
else:
maskvector *= 0.0
for idx in range(0, theshape[1]):
starttime = inputdata[0, idx]
endtime = starttime + inputdata[1, idx]
startindex = np.max((bisect.bisect_left(timeaxis, starttime), 0))
endindex = np.min((bisect.bisect_right(timeaxis, endtime), len(maskvector) - 1))
maskvector[startindex:endindex] = 1.0
LGR.info(f"{starttime}, {startindex}, {endtime}, {endindex}")
return maskvector
def numpy2shared(inarray, thetype):
thesize = inarray.size
theshape = inarray.shape
if thetype == np.float64:
inarray_shared = mp.RawArray("d", inarray.reshape(thesize))
else:
inarray_shared = mp.RawArray("f", inarray.reshape(thesize))
inarray = np.frombuffer(inarray_shared, dtype=thetype, count=thesize)
inarray.shape = theshape
return inarray
def allocshared(theshape, thetype):
thesize = int(1)
if not isinstance(theshape, (list, tuple)):
thesize = theshape
else:
for element in theshape:
thesize *= int(element)
if thetype == np.float64:
outarray_shared = mp.RawArray("d", thesize)
else:
outarray_shared = mp.RawArray("f", thesize)
outarray = np.frombuffer(outarray_shared, dtype=thetype, count=thesize)
outarray.shape = theshape
return outarray, outarray_shared, theshape
def readamask(maskfilename, nim_hdr, xsize, istext=False, valslist=None, maskname="the"):
LGR.verbose(f"readamask called with filename: {maskfilename} vals: {valslist}")
if istext:
maskarray = tide_io.readvecs(maskfilename).astype("int16")
theshape = np.shape(maskarray)
theincludexsize = theshape[0]
if not theincludexsize == xsize:
raise ValueError(
f"Dimensions of {maskname} mask do not match the input data - exiting"
)
else:
themask, maskarray, mask_hdr, maskdims, masksizes = tide_io.readfromnifti(maskfilename)
maskarray = np.round(maskarray, 0).astype("int16")
if not tide_io.checkspacematch(mask_hdr, nim_hdr):
raise ValueError(f"Dimensions of {maskname} mask do not match the fmri data - exiting")
if valslist is not None:
tempmask = (0 * maskarray).astype("int16")
for theval in valslist:
LGR.verbose(f"looking for voxels matching {theval}")
tempmask[np.where(np.fabs(maskarray - theval) < 0.1)] += 1
maskarray = np.where(tempmask > 0, 1, 0)
return maskarray
def getglobalsignal(indata, optiondict, includemask=None, excludemask=None, pcacomponents=0.8):
# Start with all voxels
themask = indata[:, 0] * 0 + 1
# modify the mask if needed
if includemask is not None:
themask = themask * includemask
if excludemask is not None:
themask = themask * (1 - excludemask)
# combine all the voxels using one of the three methods
global rt_floatset, rt_floattype
globalmean = rt_floatset(indata[0, :])
thesize = np.shape(themask)
numvoxelsused = int(np.sum(np.where(themask > 0.0, 1, 0)))
selectedvoxels = indata[np.where(themask > 0.0), :][0]
LGR.info(f"constructing global mean signal using {optiondict['globalsignalmethod']}")
if optiondict["globalsignalmethod"] == "sum":
globalmean = np.sum(selectedvoxels, axis=0)
elif optiondict["globalsignalmethod"] == "meanscale":
themean = np.mean(indata, axis=1)
for vox in range(0, thesize[0]):
if themask[vox] > 0.0:
if themean[vox] != 0.0:
globalmean += indata[vox, :] / themean[vox] - 1.0
elif optiondict["globalsignalmethod"] == "pca":
try:
thefit = PCA(n_components=pcacomponents).fit(selectedvoxels)
except ValueError:
if pcacomponents == "mle":
LGR.warning("mle estimation failed - falling back to pcacomponents=0.8")
thefit = PCA(n_components=0.8).fit(selectedvoxels)
else:
raise ValueError("unhandled math exception in PCA refinement - exiting")
varex = 100.0 * np.cumsum(thefit.explained_variance_ratio_)[len(thefit.components_) - 1]
LGR.info(
f"Using {len(thefit.components_)} component(s), accounting for "
f"{varex:.2f}% of the variance"
)
else:
dummy = optiondict["globalsignalmethod"]
raise ValueError(f"illegal globalsignalmethod: {dummy}")
LGR.info(f"used {numvoxelsused} voxels to calculate global mean signal")
return tide_math.stdnormalize(globalmean), themask
def addmemprofiling(thefunc, memprofile, themessage):
if memprofile:
return profile(thefunc, precision=2)
else:
tide_util.logmem(themessage)
return thefunc
def checkforzeromean(thedataset):
themean = np.mean(thedataset, axis=1)
thestd = np.std(thedataset, axis=1)
if np.mean(thestd) > np.mean(themean):
return True
else:
return False
def echocancel(thetimecourse, echooffset, thetimestep, outputname, padtimepoints):
tide_io.writebidstsv(
f"{outputname}_desc-echocancellation_timeseries",
thetimecourse,
1.0 / thetimestep,
columns=["original"],
append=False,
)
shifttr = echooffset / thetimestep # lagtime is in seconds
echotc, dummy, dummy, dummy = tide_resample.timeshift(thetimecourse, shifttr, padtimepoints)
echotc[0 : int(np.ceil(shifttr))] = 0.0
echofit, echoR = tide_fit.mlregress(echotc, thetimecourse)
fitcoeff = echofit[0, 1]
outputtimecourse = thetimecourse - fitcoeff * echotc
tide_io.writebidstsv(
f"{outputname}_desc-echocancellation_timeseries",
echotc,
1.0 / thetimestep,
columns=["echo"],
append=True,
)
tide_io.writebidstsv(
f"{outputname}_desc-echocancellation_timeseries",
outputtimecourse,
1.0 / thetimestep,
columns=["filtered"],
append=True,
)
return outputtimecourse, echofit, echoR
def rapidtide_main(argparsingfunc):
optiondict, theprefilter = argparsingfunc
optiondict["nodename"] = platform.node()
fmrifilename = optiondict["in_file"]
outputname = optiondict["outputname"]
filename = optiondict["regressorfile"]
# Set up loggers for workflow
setup_logger(
logger_filename=f"{outputname}_log.txt",
timing_filename=f"{outputname}_runtimings.tsv",
memory_filename=f"{outputname}_memusage.tsv",
verbose=optiondict["verbose"],
debug=optiondict["debug"],
)
TimingLGR.info("Start")
# construct the BIDS base dictionary
outputpath = os.path.dirname(optiondict["outputname"])
rawsources = [os.path.relpath(optiondict["in_file"], start=outputpath)]
if optiondict["regressorfile"] is not None:
rawsources.append(os.path.relpath(optiondict["regressorfile"], start=outputpath))
bidsbasedict = {
"RawSources": rawsources,
"Units": "arbitrary",
"CommandLineArgs": optiondict["commandlineargs"],
}
TimingLGR.info("Argument parsing done")
# don't use shared memory if there is only one process
if (optiondict["nprocs"] == 1) and not optiondict["alwaysmultiproc"]:
optiondict["sharedmem"] = False
LGR.info("running single process - disabled shared memory use")
# disable numba now if we're going to do it (before any jits)
if optiondict["nonumba"]:
tide_util.disablenumba()
# set the internal precision
global rt_floatset, rt_floattype
if optiondict["internalprecision"] == "double":
LGR.info("setting internal precision to double")
rt_floattype = "float64"
rt_floatset = np.float64
else:
LGR.info("setting internal precision to single")
rt_floattype = "float32"
rt_floatset = np.float32
# set the output precision
if optiondict["outputprecision"] == "double":
LGR.info("setting output precision to double")
rt_outfloattype = "float64"
rt_outfloatset = np.float64
else:
LGR.info("setting output precision to single")
rt_outfloattype = "float32"
rt_outfloatset = np.float32
# set set the number of worker processes if multiprocessing
if optiondict["nprocs"] < 1:
optiondict["nprocs"] = tide_multiproc.maxcpus()
if optiondict["singleproc_getNullDist"]:
optiondict["nprocs_getNullDist"] = 1
else:
optiondict["nprocs_getNullDist"] = optiondict["nprocs"]
if optiondict["singleproc_calcsimilarity"]:
optiondict["nprocs_calcsimilarity"] = 1
else:
optiondict["nprocs_calcsimilarity"] = optiondict["nprocs"]
if optiondict["singleproc_peakeval"]:
optiondict["nprocs_peakeval"] = 1
else:
optiondict["nprocs_peakeval"] = optiondict["nprocs"]
if optiondict["singleproc_fitcorr"]:
optiondict["nprocs_fitcorr"] = 1
else:
optiondict["nprocs_fitcorr"] = optiondict["nprocs"]
if optiondict["singleproc_glm"]:
optiondict["nprocs_glm"] = 1
else:
optiondict["nprocs_glm"] = optiondict["nprocs"]
# set the number of MKL threads to use
if mklexists:
mklmaxthreads = mkl.get_max_threads()
if not (1 <= optiondict["mklthreads"] <= mklmaxthreads):
optiondict["mklthreads"] = mklmaxthreads
mkl.set_num_threads(optiondict["mklthreads"])
print(f"using {optiondict['mklthreads']} MKL threads")
# Generate MemoryLGR output file with column names
if not optiondict["memprofile"]:
tide_util.logmem()
# open the fmri datafile
tide_util.logmem("before reading in fmri data")
if tide_io.checkiftext(fmrifilename):
LGR.info("input file is text - all I/O will be to text files")
optiondict["textio"] = True
if optiondict["gausssigma"] > 0.0:
optiondict["gausssigma"] = 0.0
LGR.info("gaussian spatial filter disabled for text input files")
else:
optiondict["textio"] = False
if optiondict["textio"]:
nim_data = tide_io.readvecs(fmrifilename)
nim_hdr = None
theshape = np.shape(nim_data)
xsize = theshape[0]
ysize = 1
numslices = 1
fileiscifti = False
timepoints = theshape[1]
thesizes = [0, int(xsize), 1, 1, int(timepoints)]
numspatiallocs = int(xsize)
else:
fileiscifti = tide_io.checkifcifti(fmrifilename)
if fileiscifti:
LGR.info("input file is CIFTI")
(
cifti,
cifti_hdr,
nim_data,
nim_hdr,
thedims,
thesizes,
dummy,
) = tide_io.readfromcifti(fmrifilename)
optiondict["isgrayordinate"] = True
timepoints = nim_data.shape[1]
numspatiallocs = nim_data.shape[0]
LGR.info(f"cifti file has {timepoints} timepoints, {numspatiallocs} numspatiallocs")
slicesize = numspatiallocs
else:
LGR.info("input file is NIFTI")
nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(fmrifilename)
optiondict["isgrayordinate"] = False
xsize, ysize, numslices, timepoints = tide_io.parseniftidims(thedims)
numspatiallocs = int(xsize) * int(ysize) * int(numslices)
xdim, ydim, slicethickness, tr = tide_io.parseniftisizes(thesizes)
tide_util.logmem("after reading in fmri data")
# correct some fields if necessary
if fileiscifti:
fmritr = 0.72 # this is wrong and is a hack until I can parse CIFTI XML
else:
if optiondict["textio"]:
if optiondict["realtr"] <= 0.0:
raise ValueError(
"for text file data input, you must use the -t option to set the timestep"
)
else:
if nim_hdr.get_xyzt_units()[1] == "msec":
fmritr = thesizes[4] / 1000.0
else:
fmritr = thesizes[4]
if optiondict["realtr"] > 0.0:
fmritr = optiondict["realtr"]
# check to see if we need to adjust the oversample factor
if optiondict["oversampfactor"] < 0:
optiondict["oversampfactor"] = int(np.max([np.ceil(fmritr / 0.5), 1]))
LGR.info(f"oversample factor set to {optiondict['oversampfactor']}")
oversamptr = fmritr / optiondict["oversampfactor"]
LGR.verbose(f"fmri data: {timepoints} timepoints, tr = {fmritr}, oversamptr = {oversamptr}")
LGR.info(f"{numspatiallocs} spatial locations, {timepoints} timepoints")
TimingLGR.info("Finish reading fmrifile")
# if the user has specified start and stop points, limit check, then use these numbers
validstart, validend = tide_util.startendcheck(
timepoints, optiondict["startpoint"], optiondict["endpoint"]
)
if abs(optiondict["lagmin"]) > (validend - validstart + 1) * fmritr / 2.0:
raise ValueError(
f"magnitude of lagmin exceeds {(validend - validstart + 1) * fmritr / 2.0} - invalid"
)
if abs(optiondict["lagmax"]) > (validend - validstart + 1) * fmritr / 2.0:
raise ValueError(
f"magnitude of lagmax exceeds {(validend - validstart + 1) * fmritr / 2.0} - invalid"
)
# do spatial filtering if requested
if optiondict["gausssigma"] < 0.0 and not optiondict["textio"]:
# set gausssigma automatically
optiondict["gausssigma"] = np.mean([xdim, ydim, slicethickness]) / 2.0
if optiondict["gausssigma"] > 0.0:
LGR.info(
f"applying gaussian spatial filter to timepoints {validstart} "
f"to {validend} with sigma={optiondict['gausssigma']}"
)
reportstep = 10
for i in range(validstart, validend + 1):
if (i % reportstep == 0 or i == validend) and optiondict["showprogressbar"]:
tide_util.progressbar(
i - validstart + 1,
validend - validstart + 1,
label="Percent complete",
)
nim_data[:, :, :, i] = tide_filt.ssmooth(
xdim,
ydim,
slicethickness,
optiondict["gausssigma"],
nim_data[:, :, :, i],
)
print()
TimingLGR.info("End 3D smoothing")
# reshape the data and trim to a time range, if specified. Check for special case of no trimming to save RAM
fmri_data = nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1]
validtimepoints = validend - validstart + 1
# detect zero mean data
optiondict["dataiszeromean"] = checkforzeromean(fmri_data)
if optiondict["dataiszeromean"]:
LGR.warning(
"WARNING: dataset is zero mean - forcing variance masking and no refine prenormalization. "
"Consider specifying a global mean and correlation mask."
)
optiondict["refineprenorm"] = "None"
optiondict["globalmaskmethod"] = "variance"
# read in the optional masks
tide_util.logmem("before setting masks")
internalglobalmeanincludemask = None
internalglobalmeanexcludemask = None
internalrefineincludemask = None
internalrefineexcludemask = None
if optiondict["globalmeanincludename"] is not None:
LGR.info("constructing global mean include mask")
theglobalmeanincludemask = readamask(
optiondict["globalmeanincludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["globalmeanincludevals"],
maskname="global mean include",
)
internalglobalmeanincludemask = theglobalmeanincludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalglobalmeanincludemask) == 0:
raise ValueError(
"ERROR: there are no voxels in the global mean include mask - exiting"
)
if optiondict["globalmeanexcludename"] is not None:
LGR.info("constructing global mean exclude mask")
theglobalmeanexcludemask = readamask(
optiondict["globalmeanexcludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["globalmeanexcludevals"],
maskname="global mean exclude",
)
internalglobalmeanexcludemask = theglobalmeanexcludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalglobalmeanexcludemask) == numspatiallocs:
raise ValueError(
"ERROR: the global mean exclude mask does not leave any voxels - exiting"
)
if (internalglobalmeanincludemask is not None) and (internalglobalmeanexcludemask is not None):
if (
tide_stats.getmasksize(
internalglobalmeanincludemask * (1 - internalglobalmeanexcludemask)
)
== 0
):
raise ValueError(
"ERROR: the global mean include and exclude masks not leave any voxels between them - exiting"
)
if optiondict["refineincludename"] is not None:
LGR.info("constructing refine include mask")
therefineincludemask = readamask(
optiondict["refineincludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["refineincludevals"],
maskname="refine include",
)
internalrefineincludemask = therefineincludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalrefineincludemask) == 0:
raise ValueError("ERROR: there are no voxels in the refine include mask - exiting")
if optiondict["refineexcludename"] is not None:
LGR.info("constructing refine exclude mask")
therefineexcludemask = readamask(
optiondict["refineexcludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["refineexcludevals"],
maskname="refine exclude",
)
internalrefineexcludemask = therefineexcludemask.reshape(numspatiallocs)
if tide_stats.getmasksize(internalrefineexcludemask) == numspatiallocs:
raise ValueError("ERROR: the refine exclude mask does not leave any voxels - exiting")
tide_util.logmem("after setting masks")
# read or make a mask of where to calculate the correlations
tide_util.logmem("before selecting valid voxels")
threshval = tide_stats.getfracvals(fmri_data[:, :], [0.98])[0] / 25.0
LGR.info("constructing correlation mask")
if optiondict["corrmaskincludename"] is not None:
thecorrmask = readamask(
optiondict["corrmaskincludename"],
nim_hdr,
xsize,
istext=optiondict["textio"],
valslist=optiondict["corrmaskincludevals"],
maskname="correlation",
)
corrmask = np.uint16(np.where(thecorrmask > 0, 1, 0).reshape(numspatiallocs))
else:
# check to see if the data has been demeaned
meanim = np.mean(fmri_data, axis=1)
stdim = np.std(fmri_data, axis=1)
if fileiscifti:
corrmask = np.uint(nim_data[:, 0] * 0 + 1)
else:
if np.mean(stdim) < np.mean(meanim):
LGR.info("generating correlation mask from mean image")
corrmask = np.uint16(masking.compute_epi_mask(nim).dataobj.reshape(numspatiallocs))
else:
LGR.info("generating correlation mask from std image")
corrmask = np.uint16(
tide_stats.makemask(stdim, threshpct=optiondict["corrmaskthreshpct"])
)
if tide_stats.getmasksize(corrmask) == 0:
raise ValueError("ERROR: there are no voxels in the correlation mask - exiting")
optiondict["corrmasksize"] = tide_stats.getmasksize(corrmask)
if internalrefineincludemask is not None:
if internalrefineexcludemask is not None:
if (
tide_stats.getmasksize(
corrmask * internalrefineincludemask * (1 - internalrefineexcludemask)
)
== 0
):
raise ValueError(
"ERROR: the refine include and exclude masks not leave any voxels in the corrmask - exiting"
)
else:
if tide_stats.getmasksize(corrmask * internalrefineincludemask) == 0:
raise ValueError(
"ERROR: the refine include mask does not leave any voxels in the corrmask - exiting"
)
else:
if internalrefineexcludemask is not None:
if tide_stats.getmasksize(corrmask * (1 - internalrefineexcludemask)) == 0:
raise ValueError(
"ERROR: the refine exclude mask does not leave any voxels in the corrmask - exiting"
)
if optiondict["nothresh"]:
corrmask *= 0
corrmask += 1
threshval = -10000000.0
if optiondict["savecorrmask"] and not (fileiscifti or optiondict["textio"]):
theheader = copy.deepcopy(nim_hdr)
theheader["dim"][0] = 3
theheader["dim"][4] = 1
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-processed_mask"
else:
savename = f"{outputname}_corrmask"
tide_io.savetonifti(corrmask.reshape(xsize, ysize, numslices), theheader, savename)
LGR.verbose(f"image threshval = {threshval}")
validvoxels = np.where(corrmask > 0)[0]
numvalidspatiallocs = np.shape(validvoxels)[0]
LGR.info(f"validvoxels shape = {numvalidspatiallocs}")
fmri_data_valid = fmri_data[validvoxels, :] + 0.0
LGR.info(f"original size = {np.shape(fmri_data)}, trimmed size = {np.shape(fmri_data_valid)}")
if internalglobalmeanincludemask is not None:
internalglobalmeanincludemask_valid = 1.0 * internalglobalmeanincludemask[validvoxels]
del internalglobalmeanincludemask
LGR.info(
"internalglobalmeanincludemask_valid has size: "
f"{internalglobalmeanincludemask_valid.size}"
)
else:
internalglobalmeanincludemask_valid = None
if internalglobalmeanexcludemask is not None:
internalglobalmeanexcludemask_valid = 1.0 * internalglobalmeanexcludemask[validvoxels]
del internalglobalmeanexcludemask
LGR.info(
"internalglobalmeanexcludemask_valid has size: "
f"{internalglobalmeanexcludemask_valid.size}"
)
else:
internalglobalmeanexcludemask_valid = None
if internalrefineincludemask is not None:
internalrefineincludemask_valid = 1.0 * internalrefineincludemask[validvoxels]
del internalrefineincludemask
LGR.info(
"internalrefineincludemask_valid has size: " f"{internalrefineincludemask_valid.size}"
)
else:
internalrefineincludemask_valid = None
if internalrefineexcludemask is not None:
internalrefineexcludemask_valid = 1.0 * internalrefineexcludemask[validvoxels]
del internalrefineexcludemask
LGR.info(
"internalrefineexcludemask_valid has size: " f"{internalrefineexcludemask_valid.size}"
)
else:
internalrefineexcludemask_valid = None
tide_util.logmem("after selecting valid voxels")
# move fmri_data_valid into shared memory
if optiondict["sharedmem"]:
LGR.info("moving fmri data to shared memory")
TimingLGR.info("Start moving fmri_data to shared memory")
numpy2shared_func = addmemprofiling(
numpy2shared, optiondict["memprofile"], "before fmri data move"
)
fmri_data_valid = numpy2shared_func(fmri_data_valid, rt_floatset)
TimingLGR.info("End moving fmri_data to shared memory")
# get rid of memory we aren't using
tide_util.logmem("before purging full sized fmri data")
meanvalue = np.mean(
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1],
axis=1,
)
del fmri_data
del nim_data
gc.collect()
tide_util.logmem("after purging full sized fmri data")
# filter out motion regressors here
if optiondict["motionfilename"] is not None:
LGR.info("regressing out motion")
TimingLGR.info("Motion filtering start")
(motionregressors, motionregressorlabels, fmri_data_valid,) = tide_glmpass.motionregress(
optiondict["motionfilename"],
fmri_data_valid,
fmritr,
motstart=validstart,
motend=validend + 1,
position=optiondict["mot_pos"],
deriv=optiondict["mot_deriv"],
derivdelayed=optiondict["mot_delayderiv"],
)
TimingLGR.info(
"Motion filtering end",
{
"message2": fmri_data_valid.shape[0],
"message3": "voxels",
},
)
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-orthogonalizedmotion_timeseries",
motionregressors,
1.0 / fmritr,
columns=motionregressorlabels,
append=True,
)
else:
tide_io.writenpvecs(motionregressors, f"{outputname}_orthogonalizedmotion.txt")
if optiondict["memprofile"]:
memcheckpoint("...done")
else:
tide_util.logmem("after motion glm filter")
if optiondict["savemotionfiltered"]:
outfmriarray = np.zeros((numspatiallocs, validtimepoints), dtype=rt_floattype)
outfmriarray[validvoxels, :] = fmri_data_valid[:, :]
if optiondict["textio"]:
tide_io.writenpvecs(
outfmriarray.reshape((numspatiallocs, validtimepoints)),
f"{outputname}_motionfiltered.txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-motionfiltered"
else:
savename = f"{outputname}_motionfiltered"
tide_io.savetonifti(
outfmriarray.reshape((xsize, ysize, numslices, validtimepoints)),
nim_hdr,
savename,
)
# read in the timecourse to resample
TimingLGR.info("Start of reference prep")
if filename is None:
LGR.info("no regressor file specified - will use the global mean regressor")
optiondict["useglobalref"] = True
else:
optiondict["useglobalref"] = False
# calculate the global mean whether we intend to use it or not
meanfreq = 1.0 / fmritr
meanperiod = 1.0 * fmritr
meanstarttime = 0.0
meanvec, meanmask = getglobalsignal(
fmri_data_valid,
optiondict,
includemask=internalglobalmeanincludemask_valid,
excludemask=internalglobalmeanexcludemask_valid,
pcacomponents=optiondict["globalpcacomponents"],
)
# now set the regressor that we'll use
if optiondict["useglobalref"]:
LGR.info("using global mean as probe regressor")
inputfreq = meanfreq
inputperiod = meanperiod
inputstarttime = meanstarttime
inputvec = meanvec
fullmeanmask = np.zeros(numspatiallocs, dtype=rt_floattype)
fullmeanmask[validvoxels] = meanmask[:]
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-globalmean_mask"
else:
savename = f"{outputname}_meanmask"
if fileiscifti:
theheader = copy.deepcopy(nim_hdr)
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = 1
theheader["dim"][spaceindex] = numspatiallocs
tide_io.savetocifti(
fullmeanmask,
cifti_hdr,
theheader,
savename,
isseries=False,
names=["meanmask"],
)
elif optiondict["textio"]:
tide_io.writenpvecs(
fullmeanmask,
savename + ".txt",
)
else:
theheader = copy.deepcopy(nim_hdr)
theheader["dim"][0] = 3
theheader["dim"][4] = 1
tide_io.savetonifti(
fullmeanmask.reshape((xsize, ysize, numslices)), theheader, savename
)
optiondict["preprocskip"] = 0
else:
LGR.info(f"using externally supplied probe regressor {filename}")
(
fileinputfreq,
filestarttime,
dummy,
inputvec,
dummy,
dummy,
) = tide_io.readvectorsfromtextfile(filename, onecol=True)
inputfreq = optiondict["inputfreq"]
inputstarttime = optiondict["inputstarttime"]
if inputfreq is None:
if fileinputfreq is not None:
inputfreq = fileinputfreq
else:
inputfreq = 1.0 / fmritr
LGR.warning(f"no regressor frequency specified - defaulting to {inputfreq} (1/tr)")
if inputstarttime is None:
if filestarttime is not None:
inputstarttime = filestarttime
else:
LGR.warning("no regressor start time specified - defaulting to 0.0")
inputstarttime = 0.0
inputperiod = 1.0 / inputfreq
# inputvec = tide_io.readvec(filename)
numreference = len(inputvec)
optiondict["inputfreq"] = inputfreq
optiondict["inputstarttime"] = inputstarttime
LGR.info(
"Regressor start time, end time, and step: {:.3f}, {:.3f}, {:.3f}".format(
-inputstarttime, inputstarttime + numreference * inputperiod, inputperiod
)
)
LGR.verbose("Input vector")
LGR.verbose(f"length: {len(inputvec)}")
LGR.verbose(f"input freq: {inputfreq}")
LGR.verbose(f"input start time: {inputstarttime:.3f}")
if not optiondict["useglobalref"]:
globalcorrx, globalcorry, dummy, dummy = tide_corr.arbcorr(
meanvec, meanfreq, inputvec, inputfreq, start2=inputstarttime
)
synctime = globalcorrx[np.argmax(globalcorry)]
if optiondict["autosync"]:
optiondict["offsettime"] = -synctime
optiondict["offsettime_total"] = synctime
else:
synctime = 0.0
LGR.info(f"synctime is {synctime}")
reference_x = np.arange(0.0, numreference) * inputperiod - (
inputstarttime - optiondict["offsettime"]
)
LGR.info(f"total probe regressor offset is {inputstarttime + optiondict['offsettime']}")
# Print out initial information
LGR.verbose(f"there are {numreference} points in the original regressor")
LGR.verbose(f"the timepoint spacing is {1.0 / inputfreq}")
LGR.verbose(f"the input timecourse start time is {inputstarttime}")
# generate the time axes
fmrifreq = 1.0 / fmritr
optiondict["fmrifreq"] = fmrifreq
skiptime = fmritr * (optiondict["preprocskip"])
LGR.info(f"first fMRI point is at {skiptime} seconds relative to time origin")
initial_fmri_x = np.arange(0.0, validtimepoints) * fmritr + skiptime
os_fmri_x = (
np.arange(
0.0,
validtimepoints * optiondict["oversampfactor"] - (optiondict["oversampfactor"] - 1),
)
* oversamptr
+ skiptime
)
LGR.verbose(f"os_fmri_x dim-0 shape: {np.shape(os_fmri_x)[0]}")
LGR.verbose(f"initial_fmri_x dim-0 shape: {np.shape(initial_fmri_x)[0]}")
# generate the comparison regressor from the input timecourse
# correct the output time points
# check for extrapolation
if os_fmri_x[0] < reference_x[0]:
LGR.warning(
f"WARNING: extrapolating {os_fmri_x[0] - reference_x[0]} "
"seconds of data at beginning of timecourse"
)
if os_fmri_x[-1] > reference_x[-1]:
LGR.warning(
f"WARNING: extrapolating {os_fmri_x[-1] - reference_x[-1]} "
"seconds of data at end of timecourse"
)
# invert the regressor if necessary
if optiondict["invertregressor"]:
invertfac = -1.0
else:
invertfac = 1.0
# detrend the regressor if necessary
if optiondict["detrendorder"] > 0:
reference_y = invertfac * tide_fit.detrend(
inputvec[0:numreference],
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
else:
reference_y = invertfac * (inputvec[0:numreference] - np.mean(inputvec[0:numreference]))
# write out the reference regressor prior to filtering
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-initialmovingregressor_timeseries",
reference_y,
inputfreq,
starttime=inputstarttime,
columns=["prefilt"],
append=False,
)
else:
tide_io.writenpvecs(reference_y, f"{outputname}_reference_origres_prefilt.txt")
# band limit the regressor if that is needed
LGR.info(f"filtering to {theprefilter.gettype()} band")
(
optiondict["lowerstop"],
optiondict["lowerpass"],
optiondict["upperpass"],
optiondict["upperstop"],
) = theprefilter.getfreqs()
reference_y_classfilter = theprefilter.apply(inputfreq, reference_y)
if optiondict["negativegradregressor"]:
reference_y = -np.gradient(reference_y_classfilter)
else:
reference_y = reference_y_classfilter
# write out the reference regressor used
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-initialmovingregressor_timeseries",
tide_math.stdnormalize(reference_y),
inputfreq,
starttime=inputstarttime,
columns=["postfilt"],
append=True,
)
else:
tide_io.writenpvecs(
tide_math.stdnormalize(reference_y), f"{outputname}_reference_origres.txt"
)
# filter the input data for antialiasing
if optiondict["antialias"]:
LGR.debug("applying trapezoidal antialiasing filter")
reference_y_filt = tide_filt.dolptrapfftfilt(
inputfreq,
0.25 * fmrifreq,
0.5 * fmrifreq,
reference_y,
padlen=int(inputfreq * optiondict["padseconds"]),
debug=optiondict["debug"],
)
reference_y = rt_floatset(reference_y_filt.real)
warnings.filterwarnings("ignore", "Casting*")
if optiondict["fakerun"]:
return
# generate the resampled reference regressors
oversampfreq = optiondict["oversampfactor"] / fmritr
if optiondict["detrendorder"] > 0:
resampnonosref_y = tide_fit.detrend(
tide_resample.doresample(
reference_x,
reference_y,
initial_fmri_x,
padlen=int(inputfreq * optiondict["padseconds"]),
method=optiondict["interptype"],
debug=optiondict["debug"],
),
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
resampref_y = tide_fit.detrend(
tide_resample.doresample(
reference_x,
reference_y,
os_fmri_x,
padlen=int(oversampfreq * optiondict["padseconds"]),
method=optiondict["interptype"],
debug=optiondict["debug"],
),
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
else:
resampnonosref_y = tide_resample.doresample(
reference_x,
reference_y,
initial_fmri_x,
padlen=int(inputfreq * optiondict["padseconds"]),
method=optiondict["interptype"],
)
resampref_y = tide_resample.doresample(
reference_x,
reference_y,
os_fmri_x,
padlen=int(oversampfreq * optiondict["padseconds"]),
method=optiondict["interptype"],
)
LGR.info(
f"{len(os_fmri_x)} "
f"{len(resampref_y)} "
f"{len(initial_fmri_x)} "
f"{len(resampnonosref_y)}"
)
previousnormoutputdata = resampnonosref_y + 0.0
# prepare the temporal mask
if optiondict["tmaskname"] is not None:
tmask_y = maketmask(optiondict["tmaskname"], reference_x, rt_floatset(reference_y))
tmaskos_y = tide_resample.doresample(
reference_x, tmask_y, os_fmri_x, method=optiondict["interptype"]
)
if optiondict["bidsoutput"]:
tide_io.writenpvecs(tmask_y, f"{outputname}_temporalmask.txt")
else:
tide_io.writenpvecs(tmask_y, f"{outputname}_temporalmask.txt")
resampnonosref_y *= tmask_y
thefit, R = tide_fit.mlregress(tmask_y, resampnonosref_y)
resampnonosref_y -= thefit[0, 1] * tmask_y
resampref_y *= tmaskos_y
thefit, R = tide_fit.mlregress(tmaskos_y, resampref_y)
resampref_y -= thefit[0, 1] * tmaskos_y
nonosrefname = "_reference_fmrires_pass1.txt"
osrefname = "_reference_resampres_pass1.txt"
(
optiondict["kurtosis_reference_pass1"],
optiondict["kurtosisz_reference_pass1"],
optiondict["kurtosisp_reference_pass1"],
) = tide_stats.kurtosisstats(resampref_y)
if optiondict["bidsoutput"]:
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-movingregressor_timeseries",
tide_math.stdnormalize(resampnonosref_y),
1.0 / fmritr,
columns=["pass1"],
append=False,
)
tide_io.writebidstsv(
f"{outputname}_desc-oversampledmovingregressor_timeseries",
tide_math.stdnormalize(resampref_y),
oversampfreq,
columns=["pass1"],
append=False,
)
else:
tide_io.writenpvecs(tide_math.stdnormalize(resampnonosref_y), outputname + nonosrefname)
tide_io.writenpvecs(tide_math.stdnormalize(resampref_y), outputname + osrefname)
TimingLGR.info("End of reference prep")
corrtr = oversamptr
LGR.verbose(f"corrtr={corrtr}")
# initialize the Correlator
theCorrelator = tide_classes.Correlator(
Fs=oversampfreq,
ncprefilter=theprefilter,
negativegradient=optiondict["negativegradient"],
detrendorder=optiondict["detrendorder"],
windowfunc=optiondict["windowfunc"],
corrweighting=optiondict["corrweighting"],
corrpadding=optiondict["zeropadding"],
)
theCorrelator.setreftc(
np.zeros((optiondict["oversampfactor"] * validtimepoints), dtype=np.float64)
)
corrorigin = theCorrelator.similarityfuncorigin
dummy, corrscale, dummy = theCorrelator.getfunction(trim=False)
lagmininpts = int((-optiondict["lagmin"] / corrtr) - 0.5)
lagmaxinpts = int((optiondict["lagmax"] / corrtr) + 0.5)
if (lagmaxinpts + lagmininpts) < 3:
raise ValueError(
"correlation search range is too narrow - decrease lagmin, increase lagmax, or increase oversample factor"
)
theCorrelator.setlimits(lagmininpts, lagmaxinpts)
dummy, trimmedcorrscale, dummy = theCorrelator.getfunction()
# initialize the MutualInformationator
theMutualInformationator = tide_classes.MutualInformationator(
Fs=oversampfreq,
smoothingtime=optiondict["smoothingtime"],
ncprefilter=theprefilter,
negativegradient=optiondict["negativegradient"],
detrendorder=optiondict["detrendorder"],
windowfunc=optiondict["windowfunc"],
madnorm=False,
lagmininpts=lagmininpts,
lagmaxinpts=lagmaxinpts,
debug=optiondict["debug"],
)
theMutualInformationator.setreftc(
np.zeros((optiondict["oversampfactor"] * validtimepoints), dtype=np.float64)
)
nummilags = theMutualInformationator.similarityfunclen
theMutualInformationator.setlimits(lagmininpts, lagmaxinpts)
dummy, trimmedmiscale, dummy = theMutualInformationator.getfunction()
LGR.verbose(f"trimmedcorrscale length: {len(trimmedcorrscale)}")
LGR.verbose(f"trimmedmiscale length: {len(trimmedmiscale)} {nummilags}")
LGR.verbose(f"corrorigin at point {corrorigin} {corrscale[corrorigin]}")
LGR.verbose(
f"corr range from {corrorigin - lagmininpts} ({corrscale[corrorigin - lagmininpts]}) "
f"to {corrorigin + lagmaxinpts} ({corrscale[corrorigin + lagmaxinpts]})"
)
if optiondict["savecorrtimes"]:
if optiondict["bidsoutput"]:
tide_io.writenpvecs(trimmedcorrscale, f"{outputname}_corrtimes.txt")
tide_io.writenpvecs(trimmedmiscale, f"{outputname}_mitimes.txt")
else:
tide_io.writenpvecs(trimmedcorrscale, f"{outputname}_corrtimes.txt")
tide_io.writenpvecs(trimmedmiscale, f"{outputname}_mitimes.txt")
# allocate all of the data arrays
tide_util.logmem("before main array allocation")
if optiondict["textio"]:
nativespaceshape = xsize
else:
if fileiscifti:
nativespaceshape = (1, 1, 1, 1, numspatiallocs)
else:
nativespaceshape = (xsize, ysize, numslices)
internalspaceshape = numspatiallocs
internalvalidspaceshape = numvalidspatiallocs
meanval = np.zeros(internalvalidspaceshape, dtype=rt_floattype)
lagtimes = np.zeros(internalvalidspaceshape, dtype=rt_floattype)
lagstrengths = np.zeros(internalvalidspaceshape, dtype=rt_floattype)
lagsigma = np.zeros(internalvalidspaceshape, dtype=rt_floattype)
fitmask = np.zeros(internalvalidspaceshape, dtype="uint16")
failreason = np.zeros(internalvalidspaceshape, dtype="uint32")
R2 = np.zeros(internalvalidspaceshape, dtype=rt_floattype)
outmaparray = np.zeros(internalspaceshape, dtype=rt_floattype)
tide_util.logmem("after main array allocation")
corroutlen = np.shape(trimmedcorrscale)[0]
if optiondict["textio"]:
nativecorrshape = (xsize, corroutlen)
else:
if fileiscifti:
nativecorrshape = (1, 1, 1, corroutlen, numspatiallocs)
else:
nativecorrshape = (xsize, ysize, numslices, corroutlen)
internalcorrshape = (numspatiallocs, corroutlen)
internalvalidcorrshape = (numvalidspatiallocs, corroutlen)
LGR.info(
f"allocating memory for correlation arrays {internalcorrshape} {internalvalidcorrshape}"
)
if optiondict["sharedmem"]:
corrout, dummy, dummy = allocshared(internalvalidcorrshape, rt_floatset)
gaussout, dummy, dummy = allocshared(internalvalidcorrshape, rt_floatset)
windowout, dummy, dummy = allocshared(internalvalidcorrshape, rt_floatset)
outcorrarray, dummy, dummy = allocshared(internalcorrshape, rt_floatset)
else:
corrout = np.zeros(internalvalidcorrshape, dtype=rt_floattype)
gaussout = np.zeros(internalvalidcorrshape, dtype=rt_floattype)
windowout = np.zeros(internalvalidcorrshape, dtype=rt_floattype)
outcorrarray = np.zeros(internalcorrshape, dtype=rt_floattype)
tide_util.logmem("after correlation array allocation")
if optiondict["textio"]:
nativefmrishape = (xsize, np.shape(initial_fmri_x)[0])
else:
if fileiscifti:
nativefmrishape = (1, 1, 1, np.shape(initial_fmri_x)[0], numspatiallocs)
else:
nativefmrishape = (xsize, ysize, numslices, np.shape(initial_fmri_x)[0])
internalfmrishape = (numspatiallocs, np.shape(initial_fmri_x)[0])
internalvalidfmrishape = (numvalidspatiallocs, np.shape(initial_fmri_x)[0])
if optiondict["sharedmem"]:
lagtc, dummy, dummy = allocshared(internalvalidfmrishape, rt_floatset)
else:
lagtc = np.zeros(internalvalidfmrishape, dtype=rt_floattype)
tide_util.logmem("after lagtc array allocation")
if optiondict["passes"] > 1 or optiondict["convergencethresh"] is not None:
if optiondict["sharedmem"]:
shiftedtcs, dummy, dummy = allocshared(internalvalidfmrishape, rt_floatset)
weights, dummy, dummy = allocshared(internalvalidfmrishape, rt_floatset)
else:
shiftedtcs = np.zeros(internalvalidfmrishape, dtype=rt_floattype)
weights = np.zeros(internalvalidfmrishape, dtype=rt_floattype)
tide_util.logmem("after refinement array allocation")
if optiondict["sharedmem"]:
outfmriarray, dummy, dummy = allocshared(internalfmrishape, rt_floatset)
else:
outfmriarray = np.zeros(internalfmrishape, dtype=rt_floattype)
# prepare for fast resampling
padtime = (
max((-optiondict["lagmin"], optiondict["lagmax"]))
+ 30.0
+ np.abs(optiondict["offsettime"])
)
LGR.info(f"setting up fast resampling with padtime = {padtime}")
numpadtrs = int(padtime // fmritr)
padtime = fmritr * numpadtrs
genlagtc = tide_resample.FastResampler(reference_x, reference_y, padtime=padtime)
# cycle over all voxels
refine = True
LGR.verbose(f"refine is set to {refine}")
optiondict["edgebufferfrac"] = max(
[optiondict["edgebufferfrac"], 2.0 / np.shape(corrscale)[0]]
)
LGR.verbose(f"edgebufferfrac set to {optiondict['edgebufferfrac']}")
# intitialize the correlation fitter
thefitter = tide_classes.SimilarityFunctionFitter(
lagmod=optiondict["lagmod"],
lthreshval=optiondict["lthreshval"],
uthreshval=optiondict["uthreshval"],
bipolar=optiondict["bipolar"],
lagmin=optiondict["lagmin"],
lagmax=optiondict["lagmax"],
absmaxsigma=optiondict["absmaxsigma"],
absminsigma=optiondict["absminsigma"],
debug=optiondict["debug"],
peakfittype=optiondict["peakfittype"],
searchfrac=optiondict["searchfrac"],
enforcethresh=optiondict["enforcethresh"],
hardlimit=optiondict["hardlimit"],
)
# Preprocessing - echo cancellation
if optiondict["echocancel"]:
LGR.info("\n\nEcho cancellation")
TimingLGR.info("Echo cancellation start")
calcsimilaritypass_func = addmemprofiling(
tide_calcsimfunc.correlationpass,
optiondict["memprofile"],
"before correlationpass",
)
referencetc = tide_math.corrnormalize(
resampref_y,
detrendorder=optiondict["detrendorder"],
windowfunc=optiondict["windowfunc"],
)
(voxelsprocessed_echo, theglobalmaxlist, trimmedcorrscale,) = calcsimilaritypass_func(
fmri_data_valid[:, :],
referencetc,
theCorrelator,
initial_fmri_x,
os_fmri_x,
lagmininpts,
lagmaxinpts,
corrout,
meanval,
nprocs=optiondict["nprocs_calcsimilarity"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
oversampfactor=optiondict["oversampfactor"],
interptype=optiondict["interptype"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
for i in range(len(theglobalmaxlist)):
theglobalmaxlist[i] = corrscale[theglobalmaxlist[i]]
if optiondict["bidsoutput"]:
namesuffix = "_desc-globallag_hist"
else:
namesuffix = "_globallaghist_echocancel"
tide_stats.makeandsavehistogram(
np.asarray(theglobalmaxlist),
len(corrscale),
0,
outputname + namesuffix,
displaytitle="lagtime histogram",
therange=(corrscale[0], corrscale[-1]),
refine=False,
dictvarname="globallaghist_preechocancel",
saveasbids=optiondict["bidsoutput"],
append=False,
thedict=optiondict,
)
# Now find and regress out the echo
echooffset, echoratio = tide_stats.echoloc(np.asarray(theglobalmaxlist), len(corrscale))
LGR.info(f"Echooffset, echoratio: {echooffset} {echoratio}")
echoremovedtc, echofit, echoR = echocancel(
resampref_y, echooffset, oversamptr, outputname, numpadtrs
)
optiondict["echooffset"] = echooffset
optiondict["echoratio"] = echoratio
optiondict["echofit"] = [echofit[0, 0], echofit[0, 1]]
optiondict["echofitR"] = echoR
resampref_y = echoremovedtc
TimingLGR.info(
"Echo cancellation calculation end",
{
"message2": voxelsprocessed_echo,
"message3": "voxels",
},
)
# --------------------- Main pass loop ---------------------
# loop over all passes
stoprefining = False
refinestopreason = "passesreached"
if optiondict["convergencethresh"] is None:
numpasses = optiondict["passes"]
else:
numpasses = np.max([optiondict["passes"], optiondict["maxpasses"]])
for thepass in range(1, numpasses + 1):
if stoprefining:
break
# initialize the pass
if optiondict["passes"] > 1:
LGR.info("\n\n*********************")
LGR.info(f"Pass number {thepass}")
referencetc = tide_math.corrnormalize(
resampref_y,
detrendorder=optiondict["detrendorder"],
windowfunc=optiondict["windowfunc"],
)
# Step -1 - check the regressor for periodic components in the passband
dolagmod = True
doreferencenotch = True
if optiondict["respdelete"]:
resptracker = tide_classes.FrequencyTracker(nperseg=64)
thetimes, thefreqs = resptracker.track(resampref_y, 1.0 / oversamptr)
if optiondict["bidsoutput"]:
tide_io.writevec(thefreqs, f"{outputname}_peakfreaks_pass{thepass}.txt")
else:
tide_io.writevec(thefreqs, f"{outputname}_peakfreaks_pass{thepass}.txt")
resampref_y = resptracker.clean(resampref_y, 1.0 / oversamptr, thetimes, thefreqs)
if optiondict["bidsoutput"]:
tide_io.writevec(resampref_y, f"{outputname}_respfilt_pass{thepass}.txt")
else:
tide_io.writevec(resampref_y, f"{outputname}_respfilt_pass{thepass}.txt")
referencetc = tide_math.corrnormalize(
resampref_y,
detrendorder=optiondict["detrendorder"],
windowfunc=optiondict["windowfunc"],
)
if optiondict["check_autocorrelation"]:
LGR.info("checking reference regressor autocorrelation properties")
optiondict["lagmod"] = 1000.0
lagindpad = corrorigin - 2 * np.max((lagmininpts, lagmaxinpts))
acmininpts = lagmininpts + lagindpad
acmaxinpts = lagmaxinpts + lagindpad
theCorrelator.setreftc(referencetc)
theCorrelator.setlimits(acmininpts, acmaxinpts)
thexcorr, accheckcorrscale, dummy = theCorrelator.run(resampref_y)
thefitter.setcorrtimeaxis(accheckcorrscale)
(
maxindex,
maxlag,
maxval,
acwidth,
maskval,
peakstart,
peakend,
thisfailreason,
) = tide_simfuncfit.onesimfuncfit(
thexcorr,
thefitter,
despeckle_thresh=optiondict["despeckle_thresh"],
lthreshval=optiondict["lthreshval"],
fixdelay=optiondict["fixdelay"],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
outputarray = np.asarray([accheckcorrscale, thexcorr])
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-autocorr_timeseries",
thexcorr,
1.0 / (accheckcorrscale[1] - accheckcorrscale[0]),
starttime=accheckcorrscale[0],
columns=[f"pass{thepass}"],
append=(thepass > 1),
)
else:
tide_io.writenpvecs(
outputarray,
f"{outputname}_referenceautocorr_pass" + str(thepass) + ".txt",
)
thelagthresh = np.max((abs(optiondict["lagmin"]), abs(optiondict["lagmax"])))
theampthresh = 0.1
LGR.info(
f"searching for sidelobes with amplitude > {theampthresh} "
f"with abs(lag) < {thelagthresh} s"
)
sidelobetime, sidelobeamp = tide_corr.check_autocorrelation(
accheckcorrscale,
thexcorr,
acampthresh=theampthresh,
aclagthresh=thelagthresh,
detrendorder=optiondict["detrendorder"],
)
optiondict["acwidth"] = acwidth + 0.0
optiondict["absmaxsigma"] = acwidth * 10.0
passsuffix = "_pass" + str(thepass)
if sidelobetime is not None:
optiondict["acsidelobelag" + passsuffix] = sidelobetime
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], sidelobetime / 2.0]
)
optiondict["acsidelobeamp" + passsuffix] = sidelobeamp
LGR.warning(
f"\n\nWARNING: check_autocorrelation found bad sidelobe at {sidelobetime} "
f"seconds ({1.0 / sidelobetime} Hz)..."
)
if optiondict["bidsoutput"]:
tide_io.writenpvecs(
np.array([sidelobetime]),
f"{outputname}_autocorr_sidelobetime" + passsuffix + ".txt",
)
else:
tide_io.writenpvecs(
np.array([sidelobetime]),
f"{outputname}_autocorr_sidelobetime" + passsuffix + ".txt",
)
if optiondict["fix_autocorrelation"]:
LGR.info("Removing sidelobe")
if dolagmod:
LGR.info("subjecting lag times to modulus")
optiondict["lagmod"] = sidelobetime / 2.0
if doreferencenotch:
LGR.info("removing spectral component at sidelobe frequency")
acstopfreq = 1.0 / sidelobetime
acfixfilter = tide_filt.NoncausalFilter(
transferfunc=optiondict["transferfunc"],
debug=optiondict["debug"],
)
acfixfilter.settype("arb_stop")
acfixfilter.setfreqs(
acstopfreq * 0.9,
acstopfreq * 0.95,
acstopfreq * 1.05,
acstopfreq * 1.1,
)
cleaned_resampref_y = tide_math.corrnormalize(
acfixfilter.apply(1.0 / oversamptr, resampref_y),
windowfunc="None",
detrendorder=optiondict["detrendorder"],
)
cleaned_referencetc = tide_math.corrnormalize(
cleaned_resampref_y,
detrendorder=optiondict["detrendorder"],
windowfunc=optiondict["windowfunc"],
)
cleaned_nonosreferencetc = tide_math.stdnormalize(
acfixfilter.apply(fmrifreq, resampnonosref_y)
)
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-cleanedreferencefmrires_info",
cleaned_nonosreferencetc,
fmrifreq,
columns=[f"pass{thepass}"],
append=(thepass > 1),
)
tide_io.writebidstsv(
f"{outputname}_desc-cleanedreference_info",
cleaned_referencetc,
1.0 / oversamptr,
columns=[f"pass{thepass}"],
append=(thepass > 1),
)
tide_io.writebidstsv(
f"{outputname}_desc-cleanedresamprefy_info",
cleaned_resampref_y,
1.0 / oversamptr,
columns=[f"pass{thepass}"],
append=(thepass > 1),
)
else:
tide_io.writenpvecs(
cleaned_nonosreferencetc,
f"{outputname}_cleanedreference_fmrires_pass{thepass}.txt",
)
tide_io.writenpvecs(
cleaned_referencetc,
f"{outputname}_cleanedreference_pass{thepass}.txt",
)
tide_io.writenpvecs(
cleaned_resampref_y,
f"{outputname}_cleanedresampref_y_pass{thepass}.txt",
)
else:
cleaned_resampref_y = 1.0 * tide_math.corrnormalize(
resampref_y,
windowfunc="None",
detrendorder=optiondict["detrendorder"],
)
cleaned_referencetc = 1.0 * referencetc
cleaned_nonosreferencetc = 1.0 * resampnonosref_y
else:
LGR.info("no sidelobes found in range")
cleaned_resampref_y = 1.0 * tide_math.corrnormalize(
resampref_y,
windowfunc="None",
detrendorder=optiondict["detrendorder"],
)
cleaned_referencetc = 1.0 * referencetc
cleaned_nonosreferencetc = 1.0 * resampnonosref_y
else:
cleaned_resampref_y = 1.0 * tide_math.corrnormalize(
resampref_y, windowfunc="None", detrendorder=optiondict["detrendorder"]
)
cleaned_referencetc = 1.0 * referencetc
cleaned_nonosreferencetc = 1.0 * resampnonosref_y
# Step 0 - estimate significance
if optiondict["numestreps"] > 0:
TimingLGR.info(f"Significance estimation start, pass {thepass}")
LGR.info(f"\n\nSignificance estimation, pass {thepass}")
LGR.verbose(
"calling getNullDistributionData with args: "
f"{oversampfreq} {fmritr} {corrorigin} {lagmininpts} {lagmaxinpts}"
)
getNullDistributionData_func = addmemprofiling(
tide_nullsimfunc.getNullDistributionDatax,
optiondict["memprofile"],
"before getnulldistristributiondata",
)
if optiondict["checkpoint"]:
if optiondict["bidsoutput"]:
tide_io.writenpvecs(
cleaned_referencetc,
f"{outputname}_cleanedreference_pass" + str(thepass) + ".txt",
)
tide_io.writenpvecs(
cleaned_resampref_y,
f"{outputname}_cleanedresampref_y_pass" + str(thepass) + ".txt",
)
else:
tide_io.writenpvecs(
cleaned_referencetc,
f"{outputname}_cleanedreference_pass" + str(thepass) + ".txt",
)
tide_io.writenpvecs(
cleaned_resampref_y,
f"{outputname}_cleanedresampref_y_pass" + str(thepass) + ".txt",
)
tide_io.writedicttojson(
optiondict,
f"{outputname}_options_pregetnull_pass" + str(thepass) + ".json",
)
theCorrelator.setlimits(lagmininpts, lagmaxinpts)
theCorrelator.setreftc(cleaned_resampref_y)
theMutualInformationator.setlimits(lagmininpts, lagmaxinpts)
theMutualInformationator.setreftc(cleaned_resampref_y)
dummy, trimmedcorrscale, dummy = theCorrelator.getfunction()
thefitter.setcorrtimeaxis(trimmedcorrscale)
corrdistdata = getNullDistributionData_func(
cleaned_resampref_y,
oversampfreq,
theCorrelator,
thefitter,
numestreps=optiondict["numestreps"],
nprocs=optiondict["nprocs_getNullDist"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
permutationmethod=optiondict["permutationmethod"],
fixdelay=optiondict["fixdelay"],
fixeddelayvalue=optiondict["fixeddelayvalue"],
rt_floatset=np.float64,
rt_floattype="float64",
)
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-corrdistdata_info",
corrdistdata,
1.0,
columns=["pass" + str(thepass)],
append=(thepass > 1),
)
else:
tide_io.writenpvecs(
corrdistdata,
f"{outputname}_corrdistdata_pass" + str(thepass) + ".txt",
)
# calculate percentiles for the crosscorrelation from the distribution data
thepercentiles = np.array([0.95, 0.99, 0.995, 0.999])
thepvalnames = []
for thispercentile in thepercentiles:
thepvalnames.append("{:.3f}".format(1.0 - thispercentile).replace(".", "p"))
pcts, pcts_fit, sigfit = tide_stats.sigFromDistributionData(
corrdistdata,
optiondict["sighistlen"],
thepercentiles,
twotail=optiondict["bipolar"],
nozero=optiondict["nohistzero"],
dosighistfit=optiondict["dosighistfit"],
)
for i in range(len(thepvalnames)):
optiondict[
"p_lt_" + thepvalnames[i] + "_pass" + str(thepass) + "_thresh.txt"
] = pcts[i]
if optiondict["dosighistfit"]:
optiondict[
"p_lt_" + thepvalnames[i] + "_pass" + str(thepass) + "_fitthresh"
] = pcts_fit[i]
optiondict["sigfit"] = sigfit
if optiondict["ampthreshfromsig"]:
if pcts is not None:
LGR.info(
f"setting ampthresh to the p < {1.0 - thepercentiles[0]:.3f} threshhold"
)
optiondict["ampthresh"] = pcts[0]
tide_stats.printthresholds(
pcts,
thepercentiles,
"Crosscorrelation significance thresholds from data:",
)
if optiondict["dosighistfit"]:
tide_stats.printthresholds(
pcts_fit,
thepercentiles,
"Crosscorrelation significance thresholds from fit:",
)
if optiondict["bidsoutput"]:
namesuffix = "_desc-nullsimfunc_hist"
else:
namesuffix = "_nullsimfunchist_pass" + str(thepass)
tide_stats.makeandsavehistogram(
corrdistdata,
optiondict["sighistlen"],
0,
outputname + namesuffix,
displaytitle="Null correlation histogram, pass" + str(thepass),
refine=False,
dictvarname="nullsimfunchist_pass" + str(thepass),
saveasbids=optiondict["bidsoutput"],
therange=(0.0, 1.0),
append=(thepass > 1),
thedict=optiondict,
)
else:
LGR.info("leaving ampthresh unchanged")
del corrdistdata
TimingLGR.info(
f"Significance estimation end, pass {thepass}",
{
"message2": optiondict["numestreps"],
"message3": "repetitions",
},
)
# Step 1 - Correlation step
if optiondict["similaritymetric"] == "mutualinfo":
similaritytype = "Mutual information"
elif optiondict["similaritymetric"] == "correlation":
similaritytype = "Correlation"
else:
similaritytype = "MI enhanced correlation"
LGR.info(f"\n\n{similaritytype} calculation, pass {thepass}")
TimingLGR.info(f"{similaritytype} calculation start, pass {thepass}")
calcsimilaritypass_func = addmemprofiling(
tide_calcsimfunc.correlationpass,
optiondict["memprofile"],
"before correlationpass",
)
if optiondict["similaritymetric"] == "mutualinfo":
theMutualInformationator.setlimits(lagmininpts, lagmaxinpts)
(voxelsprocessed_cp, theglobalmaxlist, trimmedcorrscale,) = calcsimilaritypass_func(
fmri_data_valid[:, :],
cleaned_referencetc,
theMutualInformationator,
initial_fmri_x,
os_fmri_x,
lagmininpts,
lagmaxinpts,
corrout,
meanval,
nprocs=optiondict["nprocs_calcsimilarity"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
oversampfactor=optiondict["oversampfactor"],
interptype=optiondict["interptype"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
else:
(voxelsprocessed_cp, theglobalmaxlist, trimmedcorrscale,) = calcsimilaritypass_func(
fmri_data_valid[:, :],
cleaned_referencetc,
theCorrelator,
initial_fmri_x,
os_fmri_x,
lagmininpts,
lagmaxinpts,
corrout,
meanval,
nprocs=optiondict["nprocs_calcsimilarity"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
oversampfactor=optiondict["oversampfactor"],
interptype=optiondict["interptype"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
for i in range(len(theglobalmaxlist)):
theglobalmaxlist[i] = corrscale[theglobalmaxlist[i]]
if optiondict["bidsoutput"]:
namesuffix = "_desc-globallag_hist"
else:
namesuffix = "_globallaghist_pass" + str(thepass)
tide_stats.makeandsavehistogram(
np.asarray(theglobalmaxlist),
len(corrscale),
0,
outputname + namesuffix,
displaytitle="lagtime histogram",
therange=(corrscale[0], corrscale[-1]),
refine=False,
dictvarname="globallaghist_pass" + str(thepass),
saveasbids=optiondict["bidsoutput"],
append=(optiondict["echocancel"] or (thepass > 1)),
thedict=optiondict,
)
if optiondict["checkpoint"]:
outcorrarray[:, :] = 0.0
outcorrarray[validvoxels, :] = corrout[:, :]
if optiondict["textio"]:
tide_io.writenpvecs(
outcorrarray.reshape(nativecorrshape),
f"{outputname}_corrout_prefit_pass" + str(thepass) + ".txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-corroutprefit_pass-" + str(thepass)
else:
savename = f"{outputname}_corrout_prefit_pass" + str(thepass)
tide_io.savetonifti(outcorrarray.reshape(nativecorrshape), theheader, savename)
TimingLGR.info(
f"{similaritytype} calculation end, pass {thepass}",
{
"message2": voxelsprocessed_cp,
"message3": "voxels",
},
)
# Step 1b. Do a peak prefit
if optiondict["similaritymetric"] == "hybrid":
LGR.info(f"\n\nPeak prefit calculation, pass {thepass}")
TimingLGR.info(f"Peak prefit calculation start, pass {thepass}")
peakevalpass_func = addmemprofiling(
tide_peakeval.peakevalpass,
optiondict["memprofile"],
"before peakevalpass",
)
voxelsprocessed_pe, thepeakdict = peakevalpass_func(
fmri_data_valid[:, :],
cleaned_referencetc,
initial_fmri_x,
os_fmri_x,
theMutualInformationator,
trimmedcorrscale,
corrout,
nprocs=optiondict["nprocs_peakeval"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
bipolar=optiondict["bipolar"],
oversampfactor=optiondict["oversampfactor"],
interptype=optiondict["interptype"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
TimingLGR.info(
f"Peak prefit end, pass {thepass}",
{
"message2": voxelsprocessed_pe,
"message3": "voxels",
},
)
mipeaks = lagtimes * 0.0
for i in range(numvalidspatiallocs):
if len(thepeakdict[str(i)]) > 0:
mipeaks[i] = thepeakdict[str(i)][0][0]
else:
thepeakdict = None
# Step 2 - similarity function fitting and time lag estimation
LGR.info(f"\n\nTime lag estimation pass {thepass}")
TimingLGR.info(f"Time lag estimation start, pass {thepass}")
fitcorr_func = addmemprofiling(
tide_simfuncfit.fitcorr, optiondict["memprofile"], "before fitcorr"
)
thefitter.setfunctype(optiondict["similaritymetric"])
thefitter.setcorrtimeaxis(trimmedcorrscale)
# use initial lags if this is a hybrid fit
if optiondict["similaritymetric"] == "hybrid" and thepeakdict is not None:
initlags = mipeaks
else:
initlags = None
voxelsprocessed_fc = fitcorr_func(
genlagtc,
initial_fmri_x,
lagtc,
trimmedcorrscale,
thefitter,
corrout,
fitmask,
failreason,
lagtimes,
lagstrengths,
lagsigma,
gaussout,
windowout,
R2,
peakdict=thepeakdict,
nprocs=optiondict["nprocs_fitcorr"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
fixdelay=optiondict["fixdelay"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
despeckle_thresh=optiondict["despeckle_thresh"],
initiallags=initlags,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
TimingLGR.info(
f"Time lag estimation end, pass {thepass}",
{
"message2": voxelsprocessed_fc,
"message3": "voxels",
},
)
# Step 2b - Correlation time despeckle
if optiondict["despeckle_passes"] > 0:
LGR.info(f"\n\nCorrelation despeckling pass {thepass}")
LGR.info(f"\tUsing despeckle_thresh = {optiondict['despeckle_thresh']:.3f}")
TimingLGR.info(f"Correlation despeckle start, pass {thepass}")
# find lags that are very different from their neighbors, and refit starting at the median lag for the point
voxelsprocessed_fc_ds = 0
despecklingdone = False
for despecklepass in range(optiondict["despeckle_passes"]):
LGR.info(f"\n\nCorrelation despeckling subpass {despecklepass + 1}")
outmaparray *= 0.0
outmaparray[validvoxels] = eval("lagtimes")[:]
medianlags = ndimage.median_filter(
outmaparray.reshape(nativespaceshape), 3
).reshape(numspatiallocs)
initlags = np.where(
np.abs(outmaparray - medianlags) > optiondict["despeckle_thresh"],
medianlags,
-1000000.0,
)[validvoxels]
if len(initlags) > 0:
if len(np.where(initlags != -1000000.0)[0]) > 0:
voxelsprocessed_thispass = fitcorr_func(
genlagtc,
initial_fmri_x,
lagtc,
trimmedcorrscale,
thefitter,
corrout,
fitmask,
failreason,
lagtimes,
lagstrengths,
lagsigma,
gaussout,
windowout,
R2,
peakdict=thepeakdict,
nprocs=optiondict["nprocs_fitcorr"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
fixdelay=optiondict["fixdelay"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
despeckle_thresh=optiondict["despeckle_thresh"],
initiallags=initlags,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
voxelsprocessed_fc_ds += voxelsprocessed_thispass
optiondict[
"despecklemasksize_pass" + str(thepass) + "_d" + str(despecklepass + 1)
] = voxelsprocessed_thispass
optiondict[
"despecklemaskpct_pass" + str(thepass) + "_d" + str(despecklepass + 1)
] = (100.0 * voxelsprocessed_thispass / optiondict["corrmasksize"])
else:
despecklingdone = True
else:
despecklingdone = True
if despecklingdone:
LGR.info("Nothing left to do! Terminating despeckling")
break
if optiondict["savedespecklemasks"] and thepass == optiondict["passes"]:
theheader = copy.deepcopy(nim_hdr)
theheader["dim"][4] = 1
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-despeckle_mask"
else:
savename = f"{outputname}_despecklemask"
if not fileiscifti:
theheader["dim"][0] = 3
tide_io.savetonifti(
(
np.where(
np.abs(outmaparray - medianlags) > optiondict["despeckle_thresh"],
medianlags,
0.0,
)
).reshape(nativespaceshape),
theheader,
savename,
)
else:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = 1
theheader["dim"][spaceindex] = numspatiallocs
tide_io.savetocifti(
(
np.where(
np.abs(outmaparray - medianlags) > optiondict["despeckle_thresh"],
medianlags,
0.0,
)
),
cifti_hdr,
theheader,
savename,
isseries=False,
names=["despecklemask"],
)
LGR.info(
f"\n\n{voxelsprocessed_fc_ds} voxels despeckled in "
f"{optiondict['despeckle_passes']} passes"
)
TimingLGR.info(
f"Correlation despeckle end, pass {thepass}",
{
"message2": voxelsprocessed_fc_ds,
"message3": "voxels",
},
)
# Step 3 - regressor refinement for next pass
if thepass < optiondict["passes"] or optiondict["convergencethresh"] is not None:
LGR.info(f"\n\nRegressor refinement, pass {thepass}")
TimingLGR.info(f"Regressor refinement start, pass {thepass}")
if optiondict["refineoffset"]:
peaklag, peakheight, peakwidth = tide_stats.gethistprops(
lagtimes[np.where(fitmask > 0)],
optiondict["histlen"],
pickleft=optiondict["pickleft"],
peakthresh=optiondict["pickleftthresh"],
)
optiondict["offsettime"] = peaklag
optiondict["offsettime_total"] += peaklag
LGR.info(
f"offset time set to {optiondict['offsettime']:.3f}, "
f"total is {optiondict['offsettime_total']:.3f}"
)
# regenerate regressor for next pass
refineregressor_func = addmemprofiling(
tide_refine.refineregressor,
optiondict["memprofile"],
"before refineregressor",
)
(
voxelsprocessed_rr,
outputdata,
refinemask,
locationfails,
ampfails,
lagfails,
sigmafails,
) = refineregressor_func(
fmri_data_valid,
fmritr,
shiftedtcs,
weights,
thepass,
lagstrengths,
lagtimes,
lagsigma,
fitmask,
R2,
theprefilter,
optiondict,
bipolar=optiondict["bipolar"],
padtrs=numpadtrs,
includemask=internalrefineincludemask_valid,
excludemask=internalrefineexcludemask_valid,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
optiondict["refinemasksize_pass" + str(thepass)] = voxelsprocessed_rr
optiondict["refinemaskpct_pass" + str(thepass)] = (
100.0 * voxelsprocessed_rr / optiondict["corrmasksize"]
)
optiondict["refinelocationfails_pass" + str(thepass)] = locationfails
optiondict["refineampfails_pass" + str(thepass)] = ampfails
optiondict["refinelagfails_pass" + str(thepass)] = lagfails
optiondict["refinesigmafails_pass" + str(thepass)] = sigmafails
if voxelsprocessed_rr > 0:
normoutputdata = tide_math.stdnormalize(theprefilter.apply(fmrifreq, outputdata))
normunfilteredoutputdata = tide_math.stdnormalize(outputdata)
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-refinedmovingregressor_timeseries",
normunfilteredoutputdata,
1.0 / fmritr,
columns=["unfiltered_pass" + str(thepass)],
append=(thepass > 1),
)
tide_io.writebidstsv(
f"{outputname}_desc-refinedmovingregressor_timeseries",
normoutputdata,
1.0 / fmritr,
columns=["filtered_pass" + str(thepass)],
append=True,
)
else:
tide_io.writenpvecs(
normoutputdata,
f"{outputname}_refinedregressor_pass" + str(thepass) + ".txt",
)
tide_io.writenpvecs(
normunfilteredoutputdata,
f"{outputname}_unfilteredrefinedregressor_pass" + str(thepass) + ".txt",
)
# check for convergence
regressormse = mse(normoutputdata, previousnormoutputdata)
optiondict["regressormse_pass" + str(thepass).zfill(2)] = regressormse
LGR.info(f"regressor difference at end of pass {thepass:d} is {regressormse:.6f}")
if optiondict["convergencethresh"] is not None:
if thepass >= optiondict["maxpasses"]:
LGR.info("refinement ended (maxpasses reached)")
stoprefining = True
refinestopreason = "maxpassesreached"
elif regressormse < optiondict["convergencethresh"]:
LGR.info("refinement ended (refinement has converged")
stoprefining = True
refinestopreason = "convergence"
else:
stoprefining = False
elif thepass >= optiondict["passes"]:
stoprefining = True
refinestopreason = "passesreached"
else:
stoprefining = False
if optiondict["detrendorder"] > 0:
resampnonosref_y = tide_fit.detrend(
tide_resample.doresample(
initial_fmri_x,
normoutputdata,
initial_fmri_x,
method=optiondict["interptype"],
),
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
resampref_y = tide_fit.detrend(
tide_resample.doresample(
initial_fmri_x,
normoutputdata,
os_fmri_x,
method=optiondict["interptype"],
),
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
else:
resampnonosref_y = tide_resample.doresample(
initial_fmri_x,
normoutputdata,
initial_fmri_x,
method=optiondict["interptype"],
)
resampref_y = tide_resample.doresample(
initial_fmri_x,
normoutputdata,
os_fmri_x,
method=optiondict["interptype"],
)
if optiondict["tmaskname"] is not None:
resampnonosref_y *= tmask_y
thefit, R = tide_fit.mlregress(tmask_y, resampnonosref_y)
resampnonosref_y -= thefit[0, 1] * tmask_y
resampref_y *= tmaskos_y
thefit, R = tide_fit.mlregress(tmaskos_y, resampref_y)
resampref_y -= thefit[0, 1] * tmaskos_y
# reinitialize lagtc for resampling
previousnormoutputdata = normoutputdata + 0.0
genlagtc = tide_resample.FastResampler(
initial_fmri_x, normoutputdata, padtime=padtime
)
nonosrefname = "_reference_fmrires_pass" + str(thepass + 1) + ".txt"
osrefname = "_reference_resampres_pass" + str(thepass + 1) + ".txt"
(
optiondict["kurtosis_reference_pass" + str(thepass + 1)],
optiondict["kurtosisz_reference_pass" + str(thepass + 1)],
optiondict["kurtosisp_reference_pass" + str(thepass + 1)],
) = tide_stats.kurtosisstats(resampref_y)
if not stoprefining:
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-movingregressor_timeseries",
tide_math.stdnormalize(resampnonosref_y),
1.0 / fmritr,
columns=["pass" + str(thepass + 1)],
append=True,
)
tide_io.writebidstsv(
f"{outputname}_desc-oversampledmovingregressor_timeseries",
tide_math.stdnormalize(resampref_y),
oversampfreq,
columns=["pass" + str(thepass + 1)],
append=True,
)
else:
tide_io.writenpvecs(
tide_math.stdnormalize(resampnonosref_y),
outputname + nonosrefname,
)
tide_io.writenpvecs(
tide_math.stdnormalize(resampref_y), outputname + osrefname
)
else:
LGR.warning(f"refinement failed - terminating at end of pass {thepass}")
stoprefining = True
refinestopreason = "emptymask"
TimingLGR.info(
f"Regressor refinement end, pass {thepass}",
{
"message2": voxelsprocessed_rr,
"message3": "voxels",
},
)
if optiondict["saveintermediatemaps"]:
maplist = [
("lagtimes", "maxtime"),
("lagstrengths", "maxcorr"),
("lagsigma", "maxwidth"),
("fitmask", "fitmask"),
("failreason", "corrfitfailreason"),
]
if thepass < optiondict["passes"]:
maplist.append(("refinemask", "refinemask"))
for mapname, mapsuffix in maplist:
if optiondict["memprofile"]:
memcheckpoint(f"about to write {mapname} to {mapsuffix}")
else:
tide_util.logmem(f"about to write {mapname} to {mapsuffix}")
outmaparray[:] = 0.0
outmaparray[validvoxels] = eval(mapname)[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outmaparray.reshape(nativespaceshape, 1),
f"{outputname}_{mapsuffix}{passsuffix}.txt",
)
else:
if optiondict["bidsoutput"]:
bidspasssuffix = f"_intermediatedata-pass{thepass}"
if mapname == "fitmask":
savename = f"{outputname}{bidspasssuffix}_desc-corrfit_mask"
elif mapname == "failreason":
savename = f"{outputname}{bidspasssuffix}_desc-corrfitfailreason_info"
else:
savename = f"{outputname}{bidspasssuffix}_desc-{mapsuffix}_map"
bidsdict = bidsbasedict.copy()
if mapname == "lagtimes" or mapname == "lagsigma":
bidsdict["Units"] = "second"
tide_io.writedicttojson(bidsdict, f"{savename}.json")
else:
savename = f"{outputname}_{mapname}" + passsuffix
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
# We are done with refinement.
if optiondict["convergencethresh"] is None:
optiondict["actual_passes"] = optiondict["passes"]
else:
optiondict["actual_passes"] = thepass - 1
optiondict["refinestopreason"] = refinestopreason
# Post refinement step -1 - Coherence calculation
if optiondict["calccoherence"]:
TimingLGR.info("Coherence calculation start")
LGR.info("\n\nCoherence calculation")
reportstep = 1000
# make the Coherer
theCoherer = tide_classes.Coherer(
Fs=(1.0 / fmritr),
reftc=cleaned_nonosreferencetc,
freqmin=0.0,
freqmax=0.2,
ncprefilter=theprefilter,
windowfunc=optiondict["windowfunc"],
detrendorder=optiondict["detrendorder"],
debug=False,
)
theCoherer.setreftc(cleaned_nonosreferencetc)
(
coherencefreqstart,
dummy,
coherencefreqstep,
coherencefreqaxissize,
) = theCoherer.getaxisinfo()
if optiondict["textio"]:
nativecoherenceshape = (xsize, coherencefreqaxissize)
else:
if fileiscifti:
nativecoherenceshape = (1, 1, 1, coherencefreqaxissize, numspatiallocs)
else:
nativecoherenceshape = (xsize, ysize, numslices, coherencefreqaxissize)
internalvalidcoherenceshape = (numvalidspatiallocs, coherencefreqaxissize)
internalcoherenceshape = (numspatiallocs, coherencefreqaxissize)
# now allocate the arrays needed for the coherence calculation
if optiondict["sharedmem"]:
coherencefunc, dummy, dummy = allocshared(internalvalidcoherenceshape, rt_outfloatset)
coherencepeakval, dummy, dummy = allocshared(numvalidspatiallocs, rt_outfloatset)
coherencepeakfreq, dummy, dummy = allocshared(numvalidspatiallocs, rt_outfloatset)
else:
coherencefunc = np.zeros(internalvalidcoherenceshape, dtype=rt_outfloattype)
coherencepeakval, dummy, dummy = allocshared(numvalidspatiallocs, rt_outfloatset)
coherencepeakfreq = np.zeros(numvalidspatiallocs, dtype=rt_outfloattype)
coherencepass_func = addmemprofiling(
tide_calccoherence.coherencepass,
optiondict["memprofile"],
"before coherencepass",
)
voxelsprocessed_coherence = coherencepass_func(
fmri_data_valid,
theCoherer,
coherencefunc,
coherencepeakval,
coherencepeakfreq,
reportstep,
alt=True,
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
nprocs=1,
alwaysmultiproc=False,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
# save the results of the calculations
outcoherencearray = np.zeros(internalcoherenceshape, dtype=rt_floattype)
outcoherencearray[validvoxels, :] = coherencefunc[:, :]
theheader = copy.deepcopy(nim_hdr)
theheader["toffset"] = coherencefreqstart
theheader["pixdim"][4] = coherencefreqstep
if optiondict["textio"]:
tide_io.writenpvecs(
outcoherencearray.reshape(nativecoherenceshape),
f"{outputname}_coherence.txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-coherence_info"
else:
savename = f"{outputname}_coherence"
if fileiscifti:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = coherencefreqaxissize
theheader["dim"][spaceindex] = numspatiallocs
tide_io.savetocifti(
outcoherencearray,
cifti_hdr,
theheader,
savename,
isseries=True,
names=["coherence"],
)
else:
theheader["dim"][0] = 3
theheader["dim"][4] = coherencefreqaxissize
tide_io.savetonifti(
outcoherencearray.reshape(nativecoherenceshape), theheader, savename
)
del coherencefunc
del outcoherencearray
TimingLGR.info(
"Coherence calculation end",
{
"message2": voxelsprocessed_coherence,
"message3": "voxels",
},
)
# Post refinement step 0 - Wiener deconvolution
if optiondict["dodeconv"]:
TimingLGR.info("Wiener deconvolution start")
LGR.info("\n\nWiener deconvolution")
reportstep = 1000
# now allocate the arrays needed for Wiener deconvolution
if optiondict["sharedmem"]:
wienerdeconv, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
wpeak, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
else:
wienerdeconv = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
wpeak = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
wienerpass_func = addmemprofiling(
tide_wiener.wienerpass,
optiondict["memprofile"],
"before wienerpass",
)
voxelsprocessed_wiener = wienerpass_func(
numspatiallocs,
reportstep,
fmri_data_valid,
threshval,
optiondict,
wienerdeconv,
wpeak,
resampref_y,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
TimingLGR.info(
"Wiener deconvolution end",
{
"message2": voxelsprocessed_wiener,
"message3": "voxels",
},
)
# Post refinement step 1 - GLM fitting to remove moving signal
if optiondict["doglmfilt"]:
TimingLGR.info("GLM filtering start")
LGR.info("\n\nGLM filtering")
reportstep = 1000
if (optiondict["gausssigma"] > 0.0) or (optiondict["glmsourcefile"] is not None):
if optiondict["glmsourcefile"] is not None:
LGR.info(f"reading in {optiondict['glmsourcefile']} for GLM filter, please wait")
if optiondict["textio"]:
nim_data = tide_io.readvecs(optiondict["glmsourcefile"])
else:
nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(
optiondict["glmsourcefile"]
)
else:
LGR.info(f"rereading {fmrifilename} for GLM filter, please wait")
if optiondict["textio"]:
nim_data = tide_io.readvecs(fmrifilename)
else:
nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(fmrifilename)
"""meanvalue = np.mean(
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1],
axis=1,
)"""
fmri_data_valid = (
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1]
)[validvoxels, :] + 0.0
# move fmri_data_valid into shared memory
if optiondict["sharedmem"]:
LGR.info("moving fmri data to shared memory")
TimingLGR.info("Start moving fmri_data to shared memory")
numpy2shared_func = addmemprofiling(
numpy2shared,
optiondict["memprofile"],
"before movetoshared (glm)",
)
fmri_data_valid = numpy2shared_func(fmri_data_valid, rt_floatset)
TimingLGR.info("End moving fmri_data to shared memory")
del nim_data
# now allocate the arrays needed for GLM filtering
if optiondict["sharedmem"]:
glmmean, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
rvalue, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
r2value, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
fitNorm, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
fitcoeff, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
movingsignal, dummy, dummy = allocshared(internalvalidfmrishape, rt_outfloatset)
filtereddata, dummy, dummy = allocshared(internalvalidfmrishape, rt_outfloatset)
else:
glmmean = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
rvalue = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
r2value = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
fitNorm = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
fitcoeff = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
movingsignal = np.zeros(internalvalidfmrishape, dtype=rt_outfloattype)
filtereddata = np.zeros(internalvalidfmrishape, dtype=rt_outfloattype)
if optiondict["memprofile"]:
memcheckpoint("about to start glm noise removal...")
else:
tide_util.logmem("before glm")
if optiondict["preservefiltering"]:
for i in range(len(validvoxels)):
fmri_data_valid[i] = theprefilter.apply(optiondict["fmrifreq"], fmri_data_valid[i])
glmpass_func = addmemprofiling(
tide_glmpass.glmpass, optiondict["memprofile"], "before glmpass"
)
voxelsprocessed_glm = glmpass_func(
numvalidspatiallocs,
fmri_data_valid,
threshval,
lagtc,
glmmean,
rvalue,
r2value,
fitcoeff,
fitNorm,
movingsignal,
filtereddata,
reportstep=reportstep,
nprocs=optiondict["nprocs_glm"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
showprogressbar=optiondict["showprogressbar"],
mp_chunksize=optiondict["mp_chunksize"],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
del fmri_data_valid
TimingLGR.info(
"GLM filtering end",
{
"message2": voxelsprocessed_glm,
"message3": "voxels",
},
)
if optiondict["memprofile"]:
memcheckpoint("...done")
else:
tide_util.logmem("after glm filter")
LGR.info("")
else:
# get the original data to calculate the mean
LGR.info(f"rereading {fmrifilename} to calculate mean value, please wait")
if optiondict["textio"]:
nim_data = tide_io.readvecs(fmrifilename)
else:
nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(fmrifilename)
"""meanvalue = np.mean(
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1], axis=1
)"""
# Post refinement step 2 - make and save interesting histograms
TimingLGR.info("Start saving histograms")
if optiondict["bidsoutput"]:
namesuffix = "_desc-maxtime_hist"
else:
namesuffix = "_laghist"
tide_stats.makeandsavehistogram(
lagtimes[np.where(fitmask > 0)],
optiondict["histlen"],
0,
outputname + namesuffix,
displaytitle="lagtime histogram",
refine=False,
dictvarname="laghist",
saveasbids=optiondict["bidsoutput"],
thedict=optiondict,
)
if optiondict["bidsoutput"]:
namesuffix = "_desc-maxcorr_hist"
else:
namesuffix = "_strengthhist"
tide_stats.makeandsavehistogram(
lagstrengths[np.where(fitmask > 0)],
optiondict["histlen"],
0,
outputname + namesuffix,
displaytitle="lagstrength histogram",
therange=(0.0, 1.0),
dictvarname="strengthhist",
saveasbids=optiondict["bidsoutput"],
thedict=optiondict,
)
if optiondict["bidsoutput"]:
namesuffix = "_desc-maxwidth_hist"
else:
namesuffix = "_widthhist"
tide_stats.makeandsavehistogram(
lagsigma[np.where(fitmask > 0)],
optiondict["histlen"],
1,
outputname + namesuffix,
displaytitle="lagsigma histogram",
dictvarname="widthhist",
saveasbids=optiondict["bidsoutput"],
thedict=optiondict,
)
if optiondict["bidsoutput"]:
namesuffix = "_desc-maxcorrsq_hist"
else:
namesuffix = "_R2hist"
if optiondict["doglmfilt"]:
tide_stats.makeandsavehistogram(
r2value[np.where(fitmask > 0)],
optiondict["histlen"],
1,
outputname + namesuffix,
displaytitle="correlation R2 histogram",
dictvarname="R2hist",
saveasbids=optiondict["bidsoutput"],
thedict=optiondict,
)
TimingLGR.info("Finished saving histograms")
# put some quality metrics into the info structure
histpcts = [0.02, 0.25, 0.5, 0.75, 0.98]
thetimepcts = tide_stats.getfracvals(lagtimes[np.where(fitmask > 0)], histpcts, nozero=False)
thestrengthpcts = tide_stats.getfracvals(
lagstrengths[np.where(fitmask > 0)], histpcts, nozero=False
)
thesigmapcts = tide_stats.getfracvals(lagsigma[np.where(fitmask > 0)], histpcts, nozero=False)
for i in range(len(histpcts)):
optiondict[
"lagtimes_" + str(int(np.round(100 * histpcts[i], 0))).zfill(2) + "pct"
] = thetimepcts[i]
optiondict[
"lagstrengths_" + str(int(np.round(100 * histpcts[i], 0))).zfill(2) + "pct"
] = thestrengthpcts[i]
optiondict[
"lagsigma_" + str(int(np.round(100 * histpcts[i], 0))).zfill(2) + "pct"
] = thesigmapcts[i]
optiondict["fitmasksize"] = np.sum(fitmask)
optiondict["fitmaskpct"] = 100.0 * optiondict["fitmasksize"] / optiondict["corrmasksize"]
# Post refinement step 3 - save out all of the important arrays to nifti files
# write out the options used
tide_io.writedicttojson(optiondict, f"{outputname}_options.json")
# do ones with one time point first
TimingLGR.info("Start saving maps")
if not optiondict["textio"]:
theheader = copy.deepcopy(nim_hdr)
if fileiscifti:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = 1
theheader["dim"][spaceindex] = numspatiallocs
else:
theheader["dim"][0] = 3
theheader["dim"][4] = 1
# Prepare extra maps
savelist = [
("lagtimes", "maxtime"),
("lagstrengths", "maxcorr"),
("lagsigma", "maxwidth"),
("R2", "maxcorrsq"),
("fitmask", "fitmask"),
("failreason", "corrfitfailreason"),
]
MTT = np.square(lagsigma) - (optiondict["acwidth"] * optiondict["acwidth"])
MTT = np.where(MTT > 0.0, MTT, 0.0)
MTT = np.sqrt(MTT)
savelist += [("MTT", "MTT")]
if optiondict["calccoherence"]:
savelist += [
("coherencepeakval", "coherencepeakval"),
("coherencepeakfreq", "coherencepeakfreq"),
]
# if optiondict["similaritymetric"] == "mutualinfo":
# savelist += [("baseline", "baseline"), ("baselinedev", "baselinedev")]
for mapname, mapsuffix in savelist:
if optiondict["memprofile"]:
memcheckpoint(f"about to write {mapname}" + "to" + mapsuffix)
else:
tide_util.logmem(f"about to write {mapname}" + "to" + mapsuffix)
outmaparray[:] = 0.0
outmaparray[validvoxels] = eval(mapname)[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outmaparray.reshape(nativespaceshape, 1),
f"{outputname}_" + mapsuffix + ".txt",
)
else:
if optiondict["bidsoutput"]:
if mapname == "fitmask":
savename = f"{outputname}_desc-corrfit_mask"
elif mapname == "failreason":
savename = f"{outputname}_desc-corrfitfailreason_info"
else:
savename = f"{outputname}_desc-" + mapsuffix + "_map"
bidsdict = bidsbasedict.copy()
if mapname == "lagtimes" or mapname == "lagsigma" or mapname == "MTT":
bidsdict["Units"] = "second"
tide_io.writedicttojson(bidsdict, savename + ".json")
else:
savename = f"{outputname}_{mapname}"
if not fileiscifti:
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
else:
tide_io.savetocifti(
outmaparray,
cifti_hdr,
theheader,
savename,
isseries=False,
names=[mapsuffix],
)
if optiondict["doglmfilt"]:
for mapname, mapsuffix in [
("rvalue", "lfofilterR"),
("r2value", "lfofilterR2"),
("glmmean", "lfofilterMean"),
("fitcoeff", "lfofilterCoeff"),
("fitNorm", "lfofilterNorm"),
]:
if optiondict["memprofile"]:
memcheckpoint(f"about to write {mapname}")
else:
tide_util.logmem(f"about to write {mapname}")
outmaparray[:] = 0.0
outmaparray[validvoxels] = eval(mapname)[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outmaparray.reshape(nativespaceshape),
f"{outputname}_" + mapsuffix + ".txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-" + mapsuffix + "_map"
bidsdict = bidsbasedict.copy()
tide_io.writedicttojson(bidsdict, savename + ".json")
else:
savename = f"{outputname}_{mapname}"
if not fileiscifti:
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
else:
tide_io.savetocifti(
outmaparray,
cifti_hdr,
theheader,
savename,
isseries=False,
names=[mapsuffix],
)
del rvalue
del r2value
del fitcoeff
del fitNorm
for mapname, mapsuffix in [("meanvalue", "mean")]:
if optiondict["memprofile"]:
memcheckpoint(f"about to write {mapname}")
else:
tide_util.logmem(f"about to write {mapname}")
outmaparray[:] = 0.0
outmaparray[:] = eval(mapname)[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outmaparray.reshape(nativespaceshape),
f"{outputname}_" + mapsuffix + ".txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-" + mapsuffix + "_map"
bidsdict = bidsbasedict.copy()
tide_io.writedicttojson(bidsdict, savename + ".json")
else:
savename = f"{outputname}_{mapname}"
if not fileiscifti:
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
else:
tide_io.savetocifti(
outmaparray,
cifti_hdr,
theheader,
savename,
isseries=False,
names=[mapsuffix],
)
del meanvalue
if optiondict["numestreps"] > 0:
for i in range(0, len(thepercentiles)):
pmask = np.where(np.abs(lagstrengths) > pcts[i], fitmask, 0 * fitmask)
outmaparray[:] = 0.0
outmaparray[validvoxels] = pmask[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outmaparray.reshape(nativespaceshape),
f"{outputname}_p_lt_" + thepvalnames[i] + "_mask.txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-plt" + thepvalnames[i] + "_mask"
else:
savename = f"{outputname}_p_lt_" + thepvalnames[i] + "_mask"
if not fileiscifti:
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
else:
tide_io.savetocifti(
outmaparray,
cifti_hdr,
theheader,
savename,
isseries=False,
names=["p_lt_" + thepvalnames[i] + "_mask"],
)
if optiondict["passes"] > 1 and optiondict["refinestopreason"] != "emptymask":
outmaparray[:] = 0.0
outmaparray[validvoxels] = refinemask[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outfmriarray.reshape(nativefmrishape), f"{outputname}_lagregressor.txt"
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-refine_mask"
else:
savename = f"{outputname}_refinemask"
if not fileiscifti:
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
else:
tide_io.savetocifti(
outmaparray,
cifti_hdr,
theheader,
savename,
isseries=False,
names=["refinemask"],
)
del refinemask
# clean up arrays that will no longer be needed
del lagtimes
del lagstrengths
del lagsigma
del R2
del fitmask
# now do the ones with other numbers of time points
if not optiondict["textio"]:
theheader = copy.deepcopy(nim_hdr)
theheader["toffset"] = corrscale[corrorigin - lagmininpts]
if fileiscifti:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = np.shape(outcorrarray)[1]
theheader["dim"][spaceindex] = numspatiallocs
else:
theheader["dim"][4] = np.shape(outcorrarray)[1]
theheader["pixdim"][4] = corrtr
outcorrarray[:, :] = 0.0
outcorrarray[validvoxels, :] = gaussout[:, :]
if optiondict["textio"]:
tide_io.writenpvecs(outcorrarray.reshape(nativecorrshape), f"{outputname}_gaussout.txt")
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-gaussout_info"
else:
savename = f"{outputname}_gaussout"
if not fileiscifti:
tide_io.savetonifti(outcorrarray.reshape(nativecorrshape), theheader, savename)
else:
tide_io.savetocifti(
outcorrarray,
cifti_hdr,
theheader,
savename,
isseries=True,
start=theheader["toffset"],
step=corrtr,
)
del gaussout
outcorrarray[:, :] = 0.0
outcorrarray[validvoxels, :] = windowout[:, :]
if optiondict["textio"]:
tide_io.writenpvecs(outcorrarray.reshape(nativecorrshape), f"{outputname}_windowout.txt")
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-corrfitwindow_info"
else:
savename = f"{outputname}_windowout"
if not fileiscifti:
tide_io.savetonifti(outcorrarray.reshape(nativecorrshape), theheader, savename)
else:
tide_io.savetocifti(
outcorrarray,
cifti_hdr,
theheader,
savename,
isseries=True,
start=theheader["toffset"],
step=corrtr,
)
del windowout
outcorrarray[:, :] = 0.0
outcorrarray[validvoxels, :] = corrout[:, :]
if optiondict["textio"]:
tide_io.writenpvecs(outcorrarray.reshape(nativecorrshape), f"{outputname}_corrout.txt")
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-corrout_info"
else:
savename = f"{outputname}_corrout"
if not fileiscifti:
tide_io.savetonifti(outcorrarray.reshape(nativecorrshape), theheader, savename)
else:
tide_io.savetocifti(
outcorrarray,
cifti_hdr,
theheader,
savename,
isseries=True,
start=theheader["toffset"],
step=corrtr,
)
del corrout
if not optiondict["textio"]:
theheader = copy.deepcopy(nim_hdr)
if fileiscifti:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = np.shape(outfmriarray)[1]
theheader["dim"][spaceindex] = numspatiallocs
else:
theheader["dim"][4] = | np.shape(outfmriarray) | numpy.shape |
import numpy as np
import pytest
from experimentator import yaml
from tests.test_design import make_heterogeneous_tree
@pytest.mark.parametrize('data', [
| np.random.randn(5) | numpy.random.randn |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
UCCSD with spatial integrals
'''
import time
from functools import reduce
import numpy as np
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import rccsd
from pyscf.ao2mo import _ao2mo
from pyscf.mp import ump2
from pyscf import scf
from pyscf import __config__
MEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)
# This is unrestricted (U)CCSD, in spatial-orbital form.
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
mo_ea_o = eris.mo_energy[0][:nocca]
mo_ea_v = eris.mo_energy[0][nocca:] + cc.level_shift
mo_eb_o = eris.mo_energy[1][:noccb]
mo_eb_v = eris.mo_energy[1][noccb:] + cc.level_shift
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
u1a = np.zeros_like(t1a)
u1b = np.zeros_like(t1b)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:u2aa += lib.einsum('ijef,aebf->ijab', tauaa, eris_vvvv) * .5
#:u2bb += lib.einsum('ijef,aebf->ijab', taubb, eris_VVVV) * .5
#:u2ab += lib.einsum('iJeF,aeBF->iJaB', tauab, eris_vvVV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
u2aa, u2ab, u2bb = cc._add_vvvv(None, (tauaa,tauab,taubb), eris)
u2aa *= .5
u2bb *= .5
Fooa = .5 * lib.einsum('me,ie->mi', fova, t1a)
Foob = .5 * lib.einsum('me,ie->mi', fovb, t1b)
Fvva = -.5 * lib.einsum('me,ma->ae', fova, t1a)
Fvvb = -.5 * lib.einsum('me,ma->ae', fovb, t1b)
Fooa += eris.focka[:nocca,:nocca] - np.diag(mo_ea_o)
Foob += eris.fockb[:noccb,:noccb] - np.diag(mo_eb_o)
Fvva += eris.focka[nocca:,nocca:] - np.diag(mo_ea_v)
Fvvb += eris.fockb[noccb:,noccb:] - np.diag(mo_eb_v)
dtype = u2aa.dtype
wovvo = np.zeros((nocca,nvira,nvira,nocca), dtype=dtype)
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb), dtype=dtype)
woVvO = np.zeros((nocca,nvirb,nvira,noccb), dtype=dtype)
woVVo = np.zeros((nocca,nvirb,nvirb,nocca), dtype=dtype)
wOvVo = np.zeros((noccb,nvira,nvirb,nocca), dtype=dtype)
wOvvO = np.zeros((noccb,nvira,nvira,noccb), dtype=dtype)
mem_now = lib.current_memory()[0]
max_memory = max(0, cc.max_memory - mem_now - u2aa.size*8e-6)
if nvira > 0 and nocca > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3+1)))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] += lib.einsum('jf,mebf->mbej', t1a, ovvv)
u1a += 0.5*lib.einsum('mief,meaf->ia', t2aa[p0:p1], ovvv)
u2aa[:,p0:p1] += lib.einsum('ie,mbea->imab', t1a, ovvv.conj())
tmp1aa = lib.einsum('ijef,mebf->ijmb', tauaa, ovvv)
u2aa -= lib.einsum('ijmb,ma->ijab', tmp1aa, t1a[p0:p1]*.5)
ovvv = tmp1aa = None
if nvirb > 0 and noccb > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3+1)))
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = lib.einsum('jf,mebf->mbej', t1b, OVVV)
u1b += 0.5*lib.einsum('MIEF,MEAF->IA', t2bb[p0:p1], OVVV)
u2bb[:,p0:p1] += lib.einsum('ie,mbea->imab', t1b, OVVV.conj())
tmp1bb = lib.einsum('ijef,mebf->ijmb', taubb, OVVV)
u2bb -= lib.einsum('ijmb,ma->ijab', tmp1bb, t1b[p0:p1]*.5)
OVVV = tmp1bb = None
if nvirb > 0 and nocca > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3+1)))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = lib.einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = lib.einsum('jf,mfBE->mBEj',-t1a, ovVV)
u1b += lib.einsum('mIeF,meAF->IA', t2ab[p0:p1], ovVV)
u2ab[p0:p1] += lib.einsum('IE,maEB->mIaB', t1b, ovVV.conj())
tmp1ab = lib.einsum('iJeF,meBF->iJmB', tauab, ovVV)
u2ab -= lib.einsum('iJmB,ma->iJaB', tmp1ab, t1a[p0:p1])
ovVV = tmp1ab = None
if nvira > 0 and noccb > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3+1)))
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = lib.einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = lib.einsum('JF,MFbe->MbeJ',-t1b, OVvv)
u1a += lib.einsum('iMfE,MEaf->ia', t2ab[:,p0:p1], OVvv)
u2ab[:,p0:p1] += lib.einsum('ie,MBea->iMaB', t1a, OVvv.conj())
tmp1abba = lib.einsum('iJeF,MFbe->iJbM', tauab, OVvv)
u2ab -= lib.einsum('iJbM,MA->iJbA', tmp1abba, t1b[p0:p1])
OVvv = tmp1abba = None
eris_ovov = np.asarray(eris.ovov)
eris_ovoo = np.asarray(eris.ovoo)
Woooo = lib.einsum('je,nemi->mnij', t1a, eris_ovoo)
Woooo = Woooo - Woooo.transpose(0,1,3,2)
Woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
Woooo += lib.einsum('ijef,menf->mnij', tauaa, eris_ovov) * .5
u2aa += lib.einsum('mnab,mnij->ijab', tauaa, Woooo*.5)
Woooo = tauaa = None
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
Fooa += np.einsum('ne,nemi->mi', t1a, ovoo)
u1a += 0.5*lib.einsum('mnae,meni->ia', t2aa, ovoo)
wovvo += lib.einsum('nb,nemj->mbej', t1a, ovoo)
ovoo = eris_ovoo = None
tilaa = make_tau_aa(t2[0], t1a, t1a, fac=0.5)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
Fvva -= .5 * lib.einsum('mnaf,menf->ae', tilaa, ovov)
Fooa += .5 * lib.einsum('inef,menf->mi', tilaa, ovov)
Fova = np.einsum('nf,menf->me',t1a, ovov)
u2aa += ovov.conj().transpose(0,2,1,3) * .5
wovvo -= 0.5*lib.einsum('jnfb,menf->mbej', t2aa, ovov)
woVvO += 0.5*lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmpaa = lib.einsum('jf,menf->mnej', t1a, ovov)
wovvo -= lib.einsum('nb,mnej->mbej', t1a, tmpaa)
eirs_ovov = ovov = tmpaa = tilaa = None
eris_OVOV = np.asarray(eris.OVOV)
eris_OVOO = np.asarray(eris.OVOO)
WOOOO = lib.einsum('je,nemi->mnij', t1b, eris_OVOO)
WOOOO = WOOOO - WOOOO.transpose(0,1,3,2)
WOOOO += np.asarray(eris.OOOO).transpose(0,2,1,3)
WOOOO += lib.einsum('ijef,menf->mnij', taubb, eris_OVOV) * .5
u2bb += lib.einsum('mnab,mnij->ijab', taubb, WOOOO*.5)
WOOOO = taubb = None
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
Foob += np.einsum('ne,nemi->mi', t1b, OVOO)
u1b += 0.5*lib.einsum('mnae,meni->ia', t2bb, OVOO)
wOVVO += lib.einsum('nb,nemj->mbej', t1b, OVOO)
OVOO = eris_OVOO = None
tilbb = make_tau_aa(t2[2], t1b, t1b, fac=0.5)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fvvb -= .5 * lib.einsum('MNAF,MENF->AE', tilbb, OVOV)
Foob += .5 * lib.einsum('inef,menf->mi', tilbb, OVOV)
Fovb = np.einsum('nf,menf->me',t1b, OVOV)
u2bb += OVOV.conj().transpose(0,2,1,3) * .5
wOVVO -= 0.5*lib.einsum('jnfb,menf->mbej', t2bb, OVOV)
wOvVo += 0.5*lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmpbb = lib.einsum('jf,menf->mnej', t1b, OVOV)
wOVVO -= lib.einsum('nb,mnej->mbej', t1b, tmpbb)
eris_OVOV = OVOV = tmpbb = tilbb = None
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
Fooa += np.einsum('NE,NEmi->mi', t1b, eris_OVoo)
u1a -= lib.einsum('nMaE,MEni->ia', t2ab, eris_OVoo)
wOvVo -= lib.einsum('nb,MEnj->MbEj', t1a, eris_OVoo)
woVVo += lib.einsum('NB,NEmj->mBEj', t1b, eris_OVoo)
Foob += np.einsum('ne,neMI->MI', t1a, eris_ovOO)
u1b -= lib.einsum('mNeA,meNI->IA', t2ab, eris_ovOO)
woVvO -= lib.einsum('NB,meNJ->mBeJ', t1b, eris_ovOO)
wOvvO += lib.einsum('nb,neMJ->MbeJ', t1a, eris_ovOO)
WoOoO = lib.einsum('JE,NEmi->mNiJ', t1b, eris_OVoo)
WoOoO+= lib.einsum('je,neMI->nMjI', t1a, eris_ovOO)
WoOoO += np.asarray(eris.ooOO).transpose(0,2,1,3)
eris_OVoo = eris_ovOO = None
eris_ovOV = np.asarray(eris.ovOV)
WoOoO += lib.einsum('iJeF,meNF->mNiJ', tauab, eris_ovOV)
u2ab += lib.einsum('mNaB,mNiJ->iJaB', tauab, WoOoO)
WoOoO = None
tilab = make_tau_ab(t2[1], t1 , t1 , fac=0.5)
Fvva -= lib.einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= lib.einsum('nMfA,nfME->AE', tilab, eris_ovOV)
Fooa += lib.einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob += lib.einsum('nIfE,nfME->MI', tilab, eris_ovOV)
Fova += np.einsum('NF,meNF->me',t1b, eris_ovOV)
Fovb += np.einsum('nf,nfME->ME',t1a, eris_ovOV)
u2ab += eris_ovOV.conj().transpose(0,2,1,3)
wovvo += 0.5*lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO += 0.5*lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
wOvVo -= 0.5*lib.einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
woVvO -= 0.5*lib.einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
woVVo += 0.5*lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += 0.5*lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpabab = lib.einsum('JF,meNF->mNeJ', t1b, eris_ovOV)
tmpbaba = lib.einsum('jf,nfME->MnEj', t1a, eris_ovOV)
woVvO -= lib.einsum('NB,mNeJ->mBeJ', t1b, tmpabab)
wOvVo -= lib.einsum('nb,MnEj->MbEj', t1a, tmpbaba)
woVVo += lib.einsum('NB,NmEj->mBEj', t1b, tmpbaba)
wOvvO += lib.einsum('nb,nMeJ->MbeJ', t1a, tmpabab)
tmpabab = tmpbaba = tilab = None
Fova += fova
Fovb += fovb
u1a += fova.conj()
u1a += np.einsum('ie,ae->ia', t1a, Fvva)
u1a -= np.einsum('ma,mi->ia', t1a, Fooa)
u1a -= np.einsum('imea,me->ia', t2aa, Fova)
u1a += np.einsum('iMaE,ME->ia', t2ab, Fovb)
u1b += fovb.conj()
u1b += np.einsum('ie,ae->ia',t1b,Fvvb)
u1b -= np.einsum('ma,mi->ia',t1b,Foob)
u1b -= np.einsum('imea,me->ia', t2bb, Fovb)
u1b += np.einsum('mIeA,me->IA', t2ab, Fova)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
wovvo -= eris_oovv.transpose(0,2,3,1)
wovvo += eris_ovvo.transpose(0,2,1,3)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
u1a-= np.einsum('nf,niaf->ia', t1a, oovv)
tmp1aa = lib.einsum('ie,mjbe->mbij', t1a, oovv)
u2aa += 2*lib.einsum('ma,mbij->ijab', t1a, tmp1aa)
eris_ovvo = eris_oovv = oovv = tmp1aa = None
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
wOVVO += eris_OVVO.transpose(0,2,1,3)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
u1b-= np.einsum('nf,niaf->ia', t1b, OOVV)
tmp1bb = lib.einsum('ie,mjbe->mbij', t1b, OOVV)
u2bb += 2*lib.einsum('ma,mbij->ijab', t1b, tmp1bb)
eris_OVVO = eris_OOVV = OOVV = None
eris_ooVV = np.asarray(eris.ooVV)
eris_ovVO = np.asarray(eris.ovVO)
woVVo -= eris_ooVV.transpose(0,2,3,1)
woVvO += eris_ovVO.transpose(0,2,1,3)
u1b+= np.einsum('nf,nfAI->IA', t1a, eris_ovVO)
tmp1ab = lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
tmp1ab+= lib.einsum('IE,mjBE->mBjI', t1b, eris_ooVV)
u2ab -= lib.einsum('ma,mBiJ->iJaB', t1a, tmp1ab)
eris_ooVV = eris_ovVo = tmp1ab = None
eris_OOvv = np.asarray(eris.OOvv)
eris_OVvo = np.asarray(eris.OVvo)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
wOvVo += eris_OVvo.transpose(0,2,1,3)
u1a+= np.einsum('NF,NFai->ia', t1b, eris_OVvo)
tmp1ba = lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
tmp1ba+= lib.einsum('ie,MJbe->MbJi', t1a, eris_OOvv)
u2ab -= lib.einsum('MA,MbIj->jIbA', t1b, tmp1ba)
eris_OOvv = eris_OVvO = tmp1ba = None
u2aa += 2*lib.einsum('imae,mbej->ijab', t2aa, wovvo)
u2aa += 2*lib.einsum('iMaE,MbEj->ijab', t2ab, wOvVo)
u2bb += 2*lib.einsum('imae,mbej->ijab', t2bb, wOVVO)
u2bb += 2*lib.einsum('mIeA,mBeJ->IJAB', t2ab, woVvO)
u2ab += lib.einsum('imae,mBeJ->iJaB', t2aa, woVvO)
u2ab += lib.einsum('iMaE,MBEJ->iJaB', t2ab, wOVVO)
u2ab += lib.einsum('iMeA,MbeJ->iJbA', t2ab, wOvvO)
u2ab += lib.einsum('IMAE,MbEj->jIbA', t2bb, wOvVo)
u2ab += lib.einsum('mIeA,mbej->jIbA', t2ab, wovvo)
u2ab += lib.einsum('mIaE,mBEj->jIaB', t2ab, woVVo)
wovvo = wOVVO = woVvO = wOvVo = woVVo = wOvvO = None
Ftmpa = Fvva - .5*lib.einsum('mb,me->be', t1a, Fova)
Ftmpb = Fvvb - .5*lib.einsum('mb,me->be', t1b, Fovb)
u2aa += lib.einsum('ijae,be->ijab', t2aa, Ftmpa)
u2bb += lib.einsum('ijae,be->ijab', t2bb, Ftmpb)
u2ab += lib.einsum('iJaE,BE->iJaB', t2ab, Ftmpb)
u2ab += lib.einsum('iJeA,be->iJbA', t2ab, Ftmpa)
Ftmpa = Fooa + 0.5*lib.einsum('je,me->mj', t1a, Fova)
Ftmpb = Foob + 0.5*lib.einsum('je,me->mj', t1b, Fovb)
u2aa -= lib.einsum('imab,mj->ijab', t2aa, Ftmpa)
u2bb -= lib.einsum('imab,mj->ijab', t2bb, Ftmpb)
u2ab -= lib.einsum('iMaB,MJ->iJaB', t2ab, Ftmpb)
u2ab -= lib.einsum('mIaB,mj->jIaB', t2ab, Ftmpa)
eris_ovoo = np.asarray(eris.ovoo).conj()
eris_OVOO = np.asarray(eris.OVOO).conj()
eris_OVoo = np.asarray(eris.OVoo).conj()
eris_ovOO = np.asarray(eris.ovOO).conj()
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
u2aa -= lib.einsum('ma,jbim->ijab', t1a, ovoo)
u2bb -= lib.einsum('ma,jbim->ijab', t1b, OVOO)
u2ab -= lib.einsum('ma,JBim->iJaB', t1a, eris_OVoo)
u2ab -= lib.einsum('MA,ibJM->iJbA', t1b, eris_ovOO)
eris_ovoo = eris_OVoo = eris_OVOO = eris_ovOO = None
u2aa *= .5
u2bb *= .5
u2aa = u2aa - u2aa.transpose(0,1,3,2)
u2aa = u2aa - u2aa.transpose(1,0,2,3)
u2bb = u2bb - u2bb.transpose(0,1,3,2)
u2bb = u2bb - u2bb.transpose(1,0,2,3)
eia_a = lib.direct_sum('i-a->ia', mo_ea_o, mo_ea_v)
eia_b = lib.direct_sum('i-a->ia', mo_eb_o, mo_eb_v)
u1a /= eia_a
u1b /= eia_b
u2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
u2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
u2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
time0 = log.timer_debug1('update t1 t2', *time0)
t1new = u1a, u1b
t2new = u2aa, u2ab, u2bb
return t1new, t2new
def energy(cc, t1=None, t2=None, eris=None):
'''UCCSD correlation energy'''
if t1 is None: t1 = cc.t1
if t2 is None: t2 = cc.t2
if eris is None: eris = cc.ao2mo()
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
e = np.einsum('ia,ia', fova, t1a)
e += np.einsum('ia,ia', fovb, t1b)
e += 0.25*np.einsum('ijab,iajb',t2aa,eris_ovov)
e -= 0.25*np.einsum('ijab,ibja',t2aa,eris_ovov)
e += 0.25*np.einsum('ijab,iajb',t2bb,eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja',t2bb,eris_OVOV)
e += np.einsum('iJaB,iaJB',t2ab,eris_ovOV)
e += 0.5*np.einsum('ia,jb,iajb',t1a,t1a,eris_ovov)
e -= 0.5*np.einsum('ia,jb,ibja',t1a,t1a,eris_ovov)
e += 0.5* | np.einsum('ia,jb,iajb',t1b,t1b,eris_OVOV) | numpy.einsum |
# -*- coding: utf-8 -*-
"""
Showcases *ICTCP* *colour encoding* computations.
"""
import numpy as np
import colour
from colour.utilities import message_box
message_box('"ICTCP" Colour Encoding Computations')
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
message_box(('Converting from "ITU-R BT.2020" colourspace to "ICTCP" colour '
'encoding given "RGB" values:\n'
'\n\t{0}'.format(RGB)))
print(colour.RGB_to_ICTCP(RGB))
print('\n')
ICTCP = | np.array([0.07351364, 0.00475253, 0.09351596]) | numpy.array |
import math
from typing import List, Tuple
import cv2
import numpy as np
from pedrec.models.constants.skeleton_pedrec import SKELETON_PEDREC_IDS
from pedrec.models.data_structures import ImageSize
from pedrec.models.human import Human
from pedrec.tracking.color_tracking import get_color_similarity
from pedrec.tracking.pose_tracking import get_skeleton_diameter, get_pose_similarity
from pedrec.utils.bb_helper import bb_iou_numpy, get_coord_bb_from_center_bb, get_bb_width, get_bb_height
from pedrec.utils.skeleton_helper import get_euclidean_joint_distances
def get_bb_color(human, img):
human_bb = get_coord_bb_from_center_bb(human.bb)
area_min_x = int(max(0, human_bb[0])) # head area = 10x10px
area_max_x = int(min(img.shape[1], human_bb[2])) # head area = 10x10px
area_min_y = int(max(0, human_bb[1])) # head area = 10x10px
area_max_y = int(min(img.shape[0], human_bb[3])) # head area = 10x10px
bb_area = img[area_min_y:area_max_y, area_min_x:area_max_x]
return | np.mean(bb_area, axis=(0, 1)) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:00:33 2018
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
from statsmodels.tsa.api import VAR
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
######################################################################
# LOAD
######################################################################
#import data
df_load = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='hourly_load',header=0)
df_weather = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='weather',header=0)
BPA_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='BPA_location_weights',header=0)
CAISO_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='CAISO_location_weights',header=0)
Name_list=pd.read_csv('Synthetic_demand_pathflows/Covariance_Calculation.csv')
Name_list=list(Name_list.loc['SALEM_T':])
Name_list=Name_list[1:]
df_wind=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_years = int(len(df_wind)/8760) + 3
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0,index_col=0)
sim_weather = sim_weather.iloc[0:365*sim_years,:]
sim_weather = sim_weather.iloc[365:len(sim_weather)-730,:]
sim_weather = sim_weather.reset_index(drop=True)
#weekday designation
dow = df_weather.loc[:,'Weekday']
#generate simulated day of the week assuming starts from monday
count=0
sim_dow= np.zeros(len(sim_weather))
for i in range(0,len(sim_weather)):
count = count +1
if count <=5:
sim_dow[i]=1
elif count > 5:
sim_dow[i]=0
if count ==7:
count =0
#Generate a datelist
datelist=pd.date_range(pd.datetime(2017,1,1),periods=365).tolist()
sim_month=np.zeros(len(sim_weather))
sim_day=np.zeros(len(sim_weather))
sim_year=np.zeros(len(sim_weather))
count=0
for i in range(0,len(sim_weather)):
if count <=364:
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
else:
count=0
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
count=count+1
######################################################################
# BPAT
######################################################################
#Find the simulated data at the sites
col_BPA_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T']
col_BPA_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W']
BPA_sim_T=sim_weather[col_BPA_T].values
BPA_sim_W=sim_weather[col_BPA_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
###########################################
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*BPA_weights.loc[0,i]
Wind[:,j] = df_weather.loc[:,n3]
weighted_SimT[:,0] = weighted_SimT[:,0] + BPA_sim_T[:,j]*BPA_weights.loc[0,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
BPA_sim_T_F=(BPA_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-BPA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,BPA_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(BPA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(BPA_sim_W,binary_HDD_sim)
#convert load to array
BPA_load = df_load.loc[:,'BPA'].values
#remove NaNs
a = np.argwhere(np.isnan(BPA_load))
for i in a:
BPA_load[i] = BPA_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(BPA_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X70p = M[(M[:,0] >= 70),2:]
y70p = M[(M[:,0] >= 70),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),2:]
y40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),1]
X30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),2:]
y30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),1]
X25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),2:]
y25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),1]
X25m = M[(M[:,0] < 25),2:]
y25m = M[(M[:,0] < 25),1]
X70p_Sim = M_sim[(M_sim[:,0] >= 70),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X40_50_Sim = M_sim[(M_sim[:,0] >= 40) & (M_sim[:,0] < 50),1:]
X30_40_Sim = M_sim[(M_sim[:,0] >= 30) & (M_sim[:,0] < 40),1:]
X25_30_Sim = M_sim[(M_sim[:,0] >= 25) & (M_sim[:,0] < 30),1:]
X25m_Sim = M_sim[(M_sim[:,0] < 25),1:]
#multivariate regression
#Create linear regression object
reg70p = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg40_50 = linear_model.LinearRegression()
reg30_40 = linear_model.LinearRegression()
reg25_30 = linear_model.LinearRegression()
reg25m = linear_model.LinearRegression()
# Train the model using the training sets
if len(y70p) > 0:
reg70p.fit(X70p,y70p)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y40_50) > 0:
reg40_50.fit(X40_50,y40_50)
if len(y30_40) > 0:
reg30_40.fit(X30_40,y30_40)
if len(y25_30) > 0:
reg25_30.fit(X25_30,y25_30)
if len(y25m) > 0:
reg25m.fit(X25m,y25m)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=70:
y_hat = reg70p.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] >= 40 and M[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M[i,0] >= 30 and M[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M[i,0] >= 25 and M[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M[i,0] < 25:
y_hat = reg25m.predict(s)
predicted = np.append(predicted,y_hat)
BPA_p = predicted.reshape((len(predicted),1))
#Simulate using the regression above
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=70:
y_hat = reg70p.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] >= 40 and M_sim[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M_sim[i,0] >= 30 and M_sim[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M_sim[i,0] >= 25 and M_sim[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M_sim[i,0] < 25:
y_hat = reg25m.predict(s)
simulated = np.append(simulated,y_hat)
BPA_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,BPA_p)
print(a[0]**2, a[1])
# Residuals
BPAresiduals = BPA_p - peaks
BPA_y = peaks
# RMSE
RMSE = (np.sum((BPAresiduals**2))/len(BPAresiduals))**.5
output = np.column_stack((BPA_p,peaks))
#########################################################################
# CAISO
#########################################################################
#Find the simulated data at the sites
col_CAISO_T = ['FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_CAISO_W = ['FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
CAISO_sim_T=sim_weather[col_CAISO_T].values
CAISO_sim_W=sim_weather[col_CAISO_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
#find average temps
cities = ['Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
Wind[:,j] = df_weather.loc[:,n3]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*CAISO_weights.loc[1,i]
weighted_SimT[:,0] = weighted_SimT[:,0] + CAISO_sim_T[:,j]*CAISO_weights.loc[1,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
CAISO_sim_T_F=(CAISO_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CAISO_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CAISO_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
CDD_wind_sim = np.multiply(CAISO_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CAISO_sim_W,binary_HDD_sim)
###########################
# CAISO - SDGE
###########################
#convert load to array
SDGE_load = df_load.loc[:,'SDGE'].values
#remove NaNs
a = np.argwhere(np.isnan(SDGE_load))
for i in a:
SDGE_load[i] = SDGE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SDGE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SDGE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
#
simulated = np.append(simulated,y_hat)
SDGE_sim = simulated.reshape((len(simulated),1))
# Residuals
SDGEresiduals = SDGE_p - peaks
SDGE_y = peaks
#a=st.pearsonr(peaks,SDGE_p)
#print a[0]**2
# RMSE
RMSE = (np.sum((SDGEresiduals**2))/len(SDGEresiduals))**.5
###########################
# CAISO - SCE
###########################
#convert load to array
SCE_load = df_load.loc[:,'SCE'].values
#remove NaNs
a = np.argwhere(np.isnan(SCE_load))
for i in a:
SCE_load[i] = SCE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SCE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SCE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
SCE_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,SCE_p)
#print a[0]**2
# Residuals
SCEresiduals = SCE_p - peaks
SCE_y = peaks
# RMSE
RMSE = (np.sum((SCEresiduals**2))/len(SCEresiduals))**.5
###########################
# CAISO - PG&E Valley
###########################
#convert load to array
PGEV_load = df_load.loc[:,'PGE_V'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEV_load))
for i in a:
PGEV_load[i] = PGEV_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEV_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEV_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
PGEV_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,PGEV_p)
print(a[0]**2, a[1])
# Residuals
PGEVresiduals = PGEV_p - peaks
PGEV_y = peaks
# RMSE
RMSE = (np.sum((PGEVresiduals**2))/len(PGEVresiduals))**.5
###########################
# CAISO - PG&E Bay
###########################
#convert load to array
PGEB_load = df_load.loc[:,'PGE_B'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEB_load))
for i in a:
PGEB_load[i] = PGEB_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEB_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path46'}, inplace=True)
df_data.rename(columns={4:'Weekday'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
y = df_data.loc[:,'Path46']
#multivariate regression
jan_reg_46 = linear_model.LinearRegression()
feb_reg_46 = linear_model.LinearRegression()
mar_reg_46 = linear_model.LinearRegression()
apr_reg_46 = linear_model.LinearRegression()
may_reg_46 = linear_model.LinearRegression()
jun_reg_46 = linear_model.LinearRegression()
jul_reg_46 = linear_model.LinearRegression()
aug_reg_46 = linear_model.LinearRegression()
sep_reg_46 = linear_model.LinearRegression()
oct_reg_46 = linear_model.LinearRegression()
nov_reg_46 = linear_model.LinearRegression()
dec_reg_46 = linear_model.LinearRegression()
# Train the model using the training sets
jan_reg_46.fit(jan.loc[:,'Weekday':],jan.loc[:,'Path46'])
feb_reg_46.fit(feb.loc[:,'Weekday':],feb.loc[:,'Path46'])
mar_reg_46.fit(mar.loc[:,'Weekday':],mar.loc[:,'Path46'])
apr_reg_46.fit(apr.loc[:,'Weekday':],apr.loc[:,'Path46'])
may_reg_46.fit(may.loc[:,'Weekday':],may.loc[:,'Path46'])
jun_reg_46.fit(jun.loc[:,'Weekday':],jun.loc[:,'Path46'])
jul_reg_46.fit(jul.loc[:,'Weekday':],jul.loc[:,'Path46'])
aug_reg_46.fit(aug.loc[:,'Weekday':],aug.loc[:,'Path46'])
sep_reg_46.fit(sep.loc[:,'Weekday':],sep.loc[:,'Path46'])
oct_reg_46.fit(oct.loc[:,'Weekday':],oct.loc[:,'Path46'])
nov_reg_46.fit(nov.loc[:,'Weekday':],nov.loc[:,'Path46'])
dec_reg_46.fit(dec.loc[:,'Weekday':],dec.loc[:,'Path46'])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Weekday':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
Path46_p = predicted
# Residuals
residuals = predicted - y.values
Residuals46 = np.reshape(residuals[730:],(1095,1))
Path46_y = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
##R2
#a=st.pearsonr(y,predicted)
#print a[0]**2
###############################
# NW PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/NW_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
H = df_data
#df_data.to_excel('Synthetic_demand_pathflows/cX.xlsx')
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path8'}, inplace=True)
df_data.rename(columns={4:'Path14'}, inplace=True)
df_data.rename(columns={5:'Path3'}, inplace=True)
df_data.rename(columns={6:'BPA_wind'}, inplace=True)
df_data.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data.rename(columns={8:'Weekday'}, inplace=True)
df_data.rename(columns={9:'Salem_HDD'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
NWPaths_p= np.zeros((len(cX),num_lines))
NWPaths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name='jan_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='feb_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='mar_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='apr_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='may_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jun_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='jul_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='aug_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='sep_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='oct_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='nov_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
name='dec_reg_NW' + str(line)
locals()[name] = linear_model.LinearRegression()
# Train the model using the training sets
name='jan_reg_NW' + str(line)
locals()[name].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
name='feb_reg_NW' + str(line)
locals()[name].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
name='mar_reg_NW' + str(line)
locals()[name].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
name='apr_reg_NW' + str(line)
locals()[name].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
name='may_reg_NW' + str(line)
locals()[name].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
name='jun_reg_NW' + str(line)
locals()[name].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
name='jul_reg_NW' + str(line)
locals()[name].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
name='aug_reg_NW' + str(line)
locals()[name].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
name='sep_reg_NW' + str(line)
locals()[name].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
name='oct_reg_NW' + str(line)
locals()[name].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
name='nov_reg_NW' + str(line)
locals()[name].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
name='dec_reg_NW' + str(line)
locals()[name].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jan_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='feb_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='mar_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='apr_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='may_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jun_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='jul_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='aug_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='sep_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='oct_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='nov_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
name='dec_reg_NW' + str(line)
p = locals()[name].predict(s)
predicted = np.append(predicted,p)
NWPaths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
NWPaths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsNWPaths = export_residuals
###############################
# Other CA PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/OtherCA_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path61'}, inplace=True)
df_data.rename(columns={4:'Path42'}, inplace=True)
df_data.rename(columns={5:'Path24'}, inplace=True)
df_data.rename(columns={6:'Path45'}, inplace=True)
df_data.rename(columns={7:'BPA_wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path61','Path42','Path24','Path45']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
OtherCA_Paths_p= np.zeros((len(cX),num_lines))
OtherCA_Paths_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_CA' + str(line)
name_2='feb_reg_CA' + str(line)
name_3='mar_reg_CA' + str(line)
name_4='apr_reg_CA' + str(line)
name_5='may_reg_CA' + str(line)
name_6='jun_reg_CA' + str(line)
name_7='jul_reg_CA' + str(line)
name_8='aug_reg_CA' + str(line)
name_9='sep_reg_CA' + str(line)
name_10='oct_reg_CA' + str(line)
name_11='nov_reg_CA' + str(line)
name_12='dec_reg_CA' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'BPA_wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'BPA_wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'BPA_wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'BPA_wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'BPA_wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'BPA_wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'BPA_wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'BPA_wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'BPA_wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'BPA_wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'BPA_wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'BPA_wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'BPA_wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
OtherCA_Paths_p[:,line_index] = predicted
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
OtherCA_Paths_y[:,line_index] = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
# #R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
ResidualsOtherCA_Paths = export_residuals
##########################
# PATH 65 & 66
##########################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/Path65_66_regression_data.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Weekday'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path65'}, inplace=True)
df_data.rename(columns={4:'Path66'}, inplace=True)
df_data.rename(columns={5:'Wind'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
lines = ['Path65','Path66']
num_lines = len(lines)
export_residuals = np.zeros((len(cX),num_lines))
Path65_66_p = np.zeros((len(cX),num_lines))
Path65_66_y = np.zeros((len(cX),num_lines))
for line in lines:
y = df_data.loc[:,line]
line_index = lines.index(line)
#multivariate regression
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
locals()[name_1] = linear_model.LinearRegression()
locals()[name_2] = linear_model.LinearRegression()
locals()[name_3] = linear_model.LinearRegression()
locals()[name_4] = linear_model.LinearRegression()
locals()[name_5] = linear_model.LinearRegression()
locals()[name_6] = linear_model.LinearRegression()
locals()[name_7] = linear_model.LinearRegression()
locals()[name_8] = linear_model.LinearRegression()
locals()[name_9] = linear_model.LinearRegression()
locals()[name_10] = linear_model.LinearRegression()
locals()[name_11] = linear_model.LinearRegression()
locals()[name_12] = linear_model.LinearRegression()
# Train the model using the training sets
locals()[name_1].fit(jan.loc[:,'Wind':],jan.loc[:,line])
locals()[name_2].fit(feb.loc[:,'Wind':],feb.loc[:,line])
locals()[name_3].fit(mar.loc[:,'Wind':],mar.loc[:,line])
locals()[name_4].fit(apr.loc[:,'Wind':],apr.loc[:,line])
locals()[name_5].fit(may.loc[:,'Wind':],may.loc[:,line])
locals()[name_6].fit(jun.loc[:,'Wind':],jun.loc[:,line])
locals()[name_7].fit(jul.loc[:,'Wind':],jul.loc[:,line])
locals()[name_8].fit(aug.loc[:,'Wind':],aug.loc[:,line])
locals()[name_9].fit(sep.loc[:,'Wind':],sep.loc[:,line])
locals()[name_10].fit(oct.loc[:,'Wind':],oct.loc[:,line])
locals()[name_11].fit(nov.loc[:,'Wind':],nov.loc[:,line])
locals()[name_12].fit(dec.loc[:,'Wind':],dec.loc[:,line])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Wind':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
Path65_66_p[:,line_index] = predicted
Path65_66_y[:,line_index] = y.values
# Residuals
residuals = predicted - y.values
export_residuals[:,line_index] = residuals
#
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
#R2
# a=st.pearsonr(y,predicted)
# print a[0]**2
Residuals65_66 = export_residuals[730:,:]
#####################################################################
# Residual Analysis
#####################################################################
R = np.column_stack((ResidualsLoad,ResidualsNWPaths,ResidualsOtherCA_Paths,Residuals46,Residuals65_66))
rc = np.shape(R)
cols = rc[1]
mus = np.zeros((cols,1))
stds = np.zeros((cols,1))
R_w = np.zeros(np.shape(R))
sim_days = len(R_w)
#whiten residuals
for i in range(0,cols):
mus[i] = np.mean(R[:,i])
stds[i] = np.std(R[:,i])
R_w[:,i] = (R[:,i] - mus[i])/stds[i]
#Vector autoregressive model on residuals
model = VAR(R_w)
results = model.fit(1)
sim_residuals = np.zeros((sim_days,cols))
errors = np.zeros((sim_days,cols))
p = results.params
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,sim_days)
ys = np.zeros((cols,1))
# Generate cross correlated residuals
for i in range(0,sim_days):
for j in range(1,cols+1):
name='y' + str(j)
locals()[name]= p[0,j-1] + p[1,j-1]*y_seeds[0]+ p[2,j-1]*y_seeds[1]+ p[3,j-1]*y_seeds[2]+ p[4,j-1]*y_seeds[3]+ p[5,j-1]*y_seeds[4]+ p[6,j-1]*y_seeds[5]+ p[7,j-1]*y_seeds[6]+ p[8,j-1]*y_seeds[7]+ p[9,j-1]*y_seeds[8]+ p[10,j-1]*y_seeds[9]+ p[11,j-1]*y_seeds[10]+ p[12,j-1]*y_seeds[11]+ p[13,j-1]*y_seeds[12]+ p[13,j-1]*y_seeds[12]+ p[14,j-1]*y_seeds[13]+ p[15,j-1]*y_seeds[14]+E[i,j-1]
for j in range(1,cols+1):
name='y' + str(j)
y_seeds[j-1]=locals()[name]
sim_residuals[i,:] = [y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12,y13,y14,y15]
for i in range(0,cols):
sim_residuals[:,i] = sim_residuals[:,i]*stds[i]*(1/np.std(sim_residuals[:,i])) + mus[i]
#validation
Y = np.column_stack((np.reshape(BPA_y[0:3*365],(1095,1)),np.reshape(SDGE_y[0:3*365],(1095,1)),np.reshape(SCE_y[0:3*365],(1095,1)),np.reshape(PGEV_y[0:3*365],(1095,1)),np.reshape(PGEB_y[0:3*365],(1095,1)),NWPaths_y,OtherCA_Paths_y,np.reshape(Path46_y[730:],(1095,1)),np.reshape(Path65_66_y[730:,:],(1095,2))))
combined_BPA = np.reshape(sim_residuals[:,0],(1095,1)) + np.reshape(BPA_p[0:3*365],(1095,1))
combined_SDGE = np.reshape(sim_residuals[:,1],(1095,1)) + np.reshape(SDGE_p[0:3*365],(1095,1))
combined_SCE = np.reshape(sim_residuals[:,2],(1095,1)) + np.reshape(SCE_p[0:3*365],(1095,1))
combined_PGEV = np.reshape(sim_residuals[:,3],(1095,1)) + np.reshape(PGEV_p[0:3*365],(1095,1))
combined_PGEB = np.reshape(sim_residuals[:,4],(1095,1)) + np.reshape(PGEB_p[0:3*365],(1095,1))
combined_Path8 = np.reshape(sim_residuals[:,5],(1095,1)) + np.reshape(NWPaths_p[:,0],(1095,1))
combined_Path14 = np.reshape(sim_residuals[:,6],(1095,1)) + np.reshape(NWPaths_p[:,1],(1095,1))
combined_Path3 = np.reshape(sim_residuals[:,7],(1095,1)) + np.reshape(NWPaths_p[:,2],(1095,1))
combined_Path61 = np.reshape(sim_residuals[:,8],(1095,1)) + np.reshape(OtherCA_Paths_p[:,0],(1095,1))
combined_Path42 = np.reshape(sim_residuals[:,9],(1095,1)) + np.reshape(OtherCA_Paths_p[:,1],(1095,1))
combined_Path24 = np.reshape(sim_residuals[:,10],(1095,1)) + np.reshape(OtherCA_Paths_p[:,2],(1095,1))
combined_Path45 = np.reshape(sim_residuals[:,11],(1095,1)) + np.reshape(OtherCA_Paths_p[:,3],(1095,1))
combined_Path46 = np.reshape(sim_residuals[:,12],(1095,1)) + np.reshape(Path46_p[730:],(1095,1))
combined_Path65 = np.reshape(sim_residuals[:,13],(1095,1)) + np.reshape(Path65_66_p[730:,0],(1095,1))
combined_Path66 = np.reshape(sim_residuals[:,14],(1095,1)) + np.reshape(Path65_66_p[730:,1],(1095,1))
combined = np.column_stack((combined_BPA,combined_SDGE,combined_SCE,combined_PGEV,combined_PGEB,combined_Path8,combined_Path14,combined_Path3,combined_Path61,combined_Path42,combined_Path24,combined_Path45,combined_Path46,combined_Path65,combined_Path66))
rc = np.shape(Y)
cols = rc[1]
names = ['BPA','SDGE','SCE','PGEV','PGEB','Path8','Path14','Path3','Path61','Path42','Path24','Path45','Path46','Path65','Path66']
#for n in names:
#
# n_index = names.index(n)
#
# plt.figure()
# plt.plot(combined[:,n_index],'r')
# plt.plot(Y[:,n_index],'b')
# plt.title(n)
#
##########################################################################################################################################################
#Simulating demand and path
#########################################################################################################################################################
#Sim Residual
simulation_length=len(sim_weather)
syn_residuals = np.zeros((simulation_length,cols))
errors = np.zeros((simulation_length,cols))
y_seeds = R_w[-1]
C = results.sigma_u
means = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
E = np.random.multivariate_normal(means,C,simulation_length)
ys = np.zeros((cols,1))
for i in range(0,simulation_length):
for n in range(0,cols):
ys[n] = p[0,n]
for m in range(0,cols):
ys[n] = ys[n] + p[m+1,n]*y_seeds[n]
ys[n] = ys[n] + E[i,n]
for n in range(0,cols):
y_seeds[n] = ys[n]
syn_residuals[i,:] = np.reshape([ys],(1,cols))
for i in range(0,cols):
syn_residuals[:,i] = syn_residuals[:,i]*stds[i]*(1/np.std(syn_residuals[:,i])) + mus[i]
##################################################
# PATH NW
##################################################
#This only uses BPA wind and hydro
col_nw_T =['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_nw_W =['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>']
num_cities = len(col_nw_T)
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T=sim_weather[col_nw_T].values
NW_sim_W=sim_weather[col_nw_W].values
NW_sim_T_F=(NW_sim_T * 9/5) +32
NW_sim_W =NW_sim_W *2.23694
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-NW_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,NW_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(NW_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(NW_sim_W,binary_HDD_sim)
#Need Month,Day,Year,8 14 3 BPA_wind,BPA_hydro
sim_BPA_hydro = pd.read_csv('PNW_hydro/FCRPS/Path_dams.csv',header=None)
sim_BPA_hydro=sim_BPA_hydro.values
sim_BPA_hydro=np.sum(sim_BPA_hydro,axis=1)/24
#What is the common length
effect_sim_year=int(len(sim_BPA_hydro)/365)
sim_month=sim_month[:len(sim_BPA_hydro)]
sim_day=sim_day[:len(sim_BPA_hydro)]
sim_year=sim_year[:len(sim_BPA_hydro)]
sim_dow= sim_dow[:len(sim_BPA_hydro)]
sim_wind_power=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_BPA_wind_power= sim_wind_power.loc[:,'BPA']/24
sim_wind_daily = np.zeros((effect_sim_year*365,1))
for i in range(0,effect_sim_year*365):
sim_wind_daily[i] = np.sum((sim_BPA_wind_power.loc[i*24:i*24+24]))
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path8'}, inplace=True)
df_data_sim.rename(columns={4:'Path14'}, inplace=True)
df_data_sim.rename(columns={5:'Path3'}, inplace=True)
df_data_sim.rename(columns={6:'BPA_wind'}, inplace=True)
df_data_sim.rename(columns={7:'BPA_hydro'}, inplace=True)
df_data_sim.rename(columns={8:'Weekday'}, inplace=True)
df_data_sim.rename(columns={9:'Salem_HDD'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path8','Path14','Path3']
upper = [1900,1500,1900]
lower = [-600,-900,-2200]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'BPA_wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_NW' + str(line)
name_2='feb_reg_NW' + str(line)
name_3='mar_reg_NW' + str(line)
name_4='apr_reg_NW' + str(line)
name_5='may_reg_NW' + str(line)
name_6='jun_reg_NW' + str(line)
name_7='jul_reg_NW' + str(line)
name_8='aug_reg_NW' + str(line)
name_9='sep_reg_NW' + str(line)
name_10='oct_reg_NW' + str(line)
name_11='nov_reg_NW' + str(line)
name_12='dec_reg_NW' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path8=predicted_Path8+syn_residuals[:effect_sim_year*365,5]
syn_Path14=predicted_Path14+syn_residuals[:effect_sim_year*365,6]
syn_Path3=predicted_Path3+syn_residuals[:effect_sim_year*365,7]
bias = np.mean(syn_Path8) - np.mean(NWPaths_y[:,0])
syn_Path8 = syn_Path8 - bias
bias = np.mean(syn_Path14) - np.mean(NWPaths_y[:,1])
syn_Path14 = syn_Path14 - bias
bias = np.mean(syn_Path3) - np.mean(NWPaths_y[:,2])
syn_Path3 = syn_Path3 - bias
S = df_data_sim.values
HO = H.values
stats = np.zeros((69,4))
for i in range(0,69):
stats[i,0] = np.mean(S[:,i])
stats[i,1] = np.mean(HO[:,i])
stats[i,2] = np.std(S[:,i])
stats[i,3] = np.std(HO[:,i])
################################################################################
###################################################
## PATH 65 & 66
###################################################
col_6566_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_6566_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','<NAME>_W']
num_cities = len(col_6566_T)
P6566_sim_T=sim_weather[col_6566_T].values
P6566_sim_W=sim_weather[col_6566_W].values
P6566_sim_W =P6566_sim_W*2.23694
sim_days = len(sim_weather)
P6566_sim_T_F=(P6566_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P6566_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P6566_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P6566_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P6566_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,syn_Path3,syn_Path8,syn_Path14,sim_dow))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path65'}, inplace=True)
df_data_sim.rename(columns={4:'Path66'}, inplace=True)
df_data_sim.rename(columns={5:'Wind'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path65','Path66']
upper = [3100,4300]
lower = [-2210,-500]
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'Wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_6566' + str(line)
name_2='feb_reg_6566' + str(line)
name_3='mar_reg_6566' + str(line)
name_4='apr_reg_6566' + str(line)
name_5='may_reg_6566' + str(line)
name_6='jun_reg_6566' + str(line)
name_7='jul_reg_6566' + str(line)
name_8='aug_reg_6566' + str(line)
name_9='sep_reg_6566' + str(line)
name_10='oct_reg_6566' + str(line)
name_11='nov_reg_6566' + str(line)
name_12='dec_reg_6566' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'Wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_12].predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper[line_index]:
predicted[i] = upper[line_index]
elif predicted[i] < lower[line_index]:
predicted[i] = lower[line_index]
name='predicted_' + str(line)
locals()[name]=predicted
syn_Path65= predicted_Path65 + syn_residuals[:effect_sim_year*365,13]
syn_Path66 = predicted_Path66 + syn_residuals[:effect_sim_year*365,14]
bias = np.mean(syn_Path65) - np.mean(Path65_66_y[:,0])
syn_Path65 = syn_Path65 - bias
bias = np.mean(syn_Path66) - np.mean(Path65_66_y[:,1])
syn_Path66 = syn_Path66 - bias
###################################################
## PATH 46
###################################################
#Find the simulated data at the sites
col_46_T = ['TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_46_W = ['TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
num_cities = len(col_46_T)
P46_sim_T=sim_weather[col_46_T].values
P46_sim_W=sim_weather[col_46_W].values
P46_sim_W =P46_sim_W *2.23694
sim_days = len(sim_weather)
P46_sim_T_F=(P46_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-P46_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,P46_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(P46_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(P46_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
sim_Hoover = pd.read_csv('Synthetic_streamflows/synthetic_discharge_Hoover.csv',header=None)
sim_Hoover=sim_Hoover.values
sim_Hoover = sim_Hoover[:effect_sim_year*365]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),sim_dow,sim_Hoover,syn_Path65,syn_Path66))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path46'}, inplace=True)
df_data_sim.rename(columns={4:'Weekday'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
y = df_data_sim.loc[:,'Path46']
predicted_Path46 =[]
rc = np.shape(jan2.loc[:,'Weekday':])
n = rc[1]
upper = 185000
lower = 48000
predicted=[]
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
if predicted[i] > upper:
predicted[i] = upper
elif predicted[i] < lower:
predicted[i] = lower
predicted_Path46=predicted
syn_Path46=predicted_Path46+syn_residuals[:effect_sim_year*365,12]
bias = np.mean(syn_Path46) - np.mean(Path46_y)
syn_Path46 = syn_Path46 - bias
syn_Path46 = syn_Path46/24
#
################################
## Other CA PATHS
################################
col_ca_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T','FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_ca_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W','TUCSON_W','PHOENIX_W','LAS VEGAS_W','FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
num_cities = len(col_ca_T)
CA_sim_T=sim_weather[col_ca_T].values
CA_sim_W=sim_weather[col_ca_W].values
CA_sim_W =CA_sim_W *2.23694
CA_sim_T_F=(CA_sim_T * 9/5) +32
HDD_sim = np.zeros((simulation_length,num_cities))
CDD_sim = np.zeros((simulation_length,num_cities))
for i in range(0,simulation_length):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CA_sim_T_F[i,j] - 65))
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(CA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CA_sim_W,binary_HDD_sim)
#HDD_sim=HDD_sim[730:len(HDD_sim)-730]
#CDD_sim=CDD_sim[730:len(CDD_sim)-730]
#
#HDD_wind_sim=HDD_wind_sim[730:len(HDD_wind_sim)-730]
#CDD_wind_sim=CDD_wind_sim[730:len(CDD_wind_sim)-730]
collect_data=np.column_stack((sim_month,sim_day,sim_year,np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),np.zeros(effect_sim_year*365),sim_wind_daily,sim_BPA_hydro,sim_dow,syn_Path46,sim_Hoover,syn_Path65,syn_Path66))
collect_data_2=np.column_stack((HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
Combined=np.column_stack((collect_data,collect_data_2))
df_data_sim = pd.DataFrame(Combined)
df_data_sim.rename(columns={0:'Month'}, inplace=True)
df_data_sim.rename(columns={3:'Path61'}, inplace=True)
df_data_sim.rename(columns={4:'Path42'}, inplace=True)
df_data_sim.rename(columns={5:'Path24'}, inplace=True)
df_data_sim.rename(columns={6:'Path45'}, inplace=True)
df_data_sim.rename(columns={7:'BPA_wind'}, inplace=True)
jan2 = df_data_sim.loc[df_data_sim['Month'] == 1,:]
feb2 = df_data_sim.loc[df_data_sim['Month'] == 2,:]
mar2 = df_data_sim.loc[df_data_sim['Month'] == 3,:]
apr2 = df_data_sim.loc[df_data_sim['Month'] == 4,:]
may2 = df_data_sim.loc[df_data_sim['Month'] == 5,:]
jun2 = df_data_sim.loc[df_data_sim['Month'] == 6,:]
jul2 = df_data_sim.loc[df_data_sim['Month'] == 7,:]
aug2 = df_data_sim.loc[df_data_sim['Month'] == 8,:]
sep2 = df_data_sim.loc[df_data_sim['Month'] == 9,:]
oct2 = df_data_sim.loc[df_data_sim['Month'] == 10,:]
nov2 = df_data_sim.loc[df_data_sim['Month'] == 11,:]
dec2 = df_data_sim.loc[df_data_sim['Month'] == 12,:]
lines = ['Path61','Path42','Path24','Path45']
upper = [1940,98,92,340]
lower = [240,-400,-48,-190]
num_lines = len(lines)
for line in lines:
name='predicted_' + str(line)
locals()[name]=[]
for line in lines:
predicted=[]
rc = np.shape(jan2.loc[:,'BPA_wind':])
n = rc[1]
y = df_data_sim.loc[:,line]
line_index = lines.index(line)
#regression names
name_1='jan_reg_CA' + str(line)
name_2='feb_reg_CA' + str(line)
name_3='mar_reg_CA' + str(line)
name_4='apr_reg_CA' + str(line)
name_5='may_reg_CA' + str(line)
name_6='jun_reg_CA' + str(line)
name_7='jul_reg_CA' + str(line)
name_8='aug_reg_CA' + str(line)
name_9='sep_reg_CA' + str(line)
name_10='oct_reg_CA' + str(line)
name_11='nov_reg_CA' + str(line)
name_12='dec_reg_CA' + str(line)
for i in range(0,len(y)):
m = df_data_sim.loc[i,'Month']
if m==1:
s = jan2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_1].predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_2].predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_3].predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_4].predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_5].predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_6].predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_7].predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_8].predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_9].predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_10].predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov2.loc[i,'BPA_wind':]
s = np.reshape(s[:,None],(1,n))
p = locals()[name_11].predict(s)
predicted = np.append(predicted,p)
else:
s = dec2.loc[i,'BPA_wind':]
s = | np.reshape(s[:,None],(1,n)) | numpy.reshape |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Planar Stacker domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import xml_tools
from lxml import etree
import numpy as np
_CLOSE = .01 # (Meters) Distance below which a thing is considered close.
_CONTROL_TIMESTEP = .01 # (Seconds)
_TIME_LIMIT = 10 # (Seconds)
_ARM_JOINTS = ['arm_root', 'arm_shoulder', 'arm_elbow', 'arm_wrist',
'finger', 'fingertip', 'thumb', 'thumbtip']
SUITE = containers.TaggedTasks()
def make_model(n_boxes):
"""Returns a tuple containing the model XML string and a dict of assets."""
xml_string = common.read_model('stacker.xml')
parser = etree.XMLParser(remove_blank_text=True)
mjcf = etree.XML(xml_string, parser)
# Remove unused boxes
for b in range(n_boxes, 4):
box = xml_tools.find_element(mjcf, 'body', 'box' + str(b))
box.getparent().remove(box)
return etree.tostring(mjcf, pretty_print=True), common.ASSETS
@SUITE.add('hard')
def stack_2(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns stacker task with 2 boxes."""
n_boxes = 2
physics = Physics.from_xml_string(*make_model(n_boxes=n_boxes))
task = Stack(n_boxes=n_boxes,
fully_observable=fully_observable,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('hard')
def stack_4(fully_observable=True, time_limit=_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns stacker task with 4 boxes."""
n_boxes = 4
physics = Physics.from_xml_string(*make_model(n_boxes=n_boxes))
task = Stack(n_boxes=n_boxes,
fully_observable=fully_observable,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, control_timestep=_CONTROL_TIMESTEP, time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics with additional features for the Planar Manipulator domain."""
def bounded_joint_pos(self, joint_names):
"""Returns joint positions as (sin, cos) values."""
joint_pos = self.named.data.qpos[joint_names]
return np.vstack([ | np.sin(joint_pos) | numpy.sin |
import numpy
from src.ppopt.utils.general_utils import *
def test_make_column_1():
test_case = make_column([1, 1, 1, 1])
correct_result = numpy.array([[1], [1], [1], [1]])
assert numpy.allclose(correct_result, test_case)
assert correct_result.shape == test_case.shape
def test_make_column_2():
k = numpy.ones((2, 2))
assert make_column(k).shape == (4, 1)
def test_make_column_3():
k = numpy.ones((2,))
assert make_column(k).shape == (2, 1)
def test_make_row_1():
test_case = make_row([1, 1, 1, 1])
correct_result = numpy.array([[1, 1, 1, 1]])
assert numpy.allclose(correct_result, test_case)
assert correct_result.shape == test_case.shape
def test_make_row_2():
k = numpy.ones((2, 2))
assert make_row(k).shape == (1, 4)
def test_make_row_3():
k = numpy.ones((2,))
assert make_row(k).shape == (1, 2)
def test_select_not_in_list_1():
A = | numpy.eye(5) | numpy.eye |
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
import provider
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KDTree
parser = argparse.ArgumentParser()
parser.add_argument('--category', default='chair', help='Which class')
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet_triplet', help='Model name [default: model]')
parser.add_argument('--log_dir', default='log_chair_triplet/', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 2048]')
parser.add_argument('--max_epoch', type=int, default=401, help='Epoch to run [default: 201]')
parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
####For triplets
parser.add_argument('--margin', type=float, default=0.5, help='Margin for hinge loss [default: 0.5]')
parser.add_argument('--positives_per_query', type=int, default=2, help='Number of potential positives in each training tuple [default: 2]')
parser.add_argument('--negatives_per_query', type=int, default=13, help='Number of definite negatives in each training tuple [default: 18]')
parser.add_argument('--output_dim', type=int, default=256, help='with or without autoencoder for triplet')
FLAGS = parser.parse_args()
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
##For triplet
POSITIVES_PER_QUERY= FLAGS.positives_per_query
NEGATIVES_PER_QUERY= FLAGS.negatives_per_query
MARGIN = FLAGS.margin
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(os.path.join(ROOT_DIR, 'models'), FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train_triplet.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
OBJ_CAT = FLAGS.category
TRAIN_FILE = '../candidate_generation/train_'+OBJ_CAT+'.h5'
TEST_FILE = '../candidate_generation/test_'+OBJ_CAT+'.h5'
TRAIN_DATA = provider.load_h5(TRAIN_FILE)
TEST_DATA = provider.load_h5(TEST_FILE)
TRAIN_CANDIDATES_FILE = 'generate_deformed_candidates/chamfer_triplet_train_'+OBJ_CAT+'.pickle'
TEST_CANDIDATES_FILE = 'generate_deformed_candidates/chamfer_triplet_test_'+OBJ_CAT+'.pickle'
pickle_in = open(TRAIN_CANDIDATES_FILE,"rb")
TRAIN_DICT = pickle.load(pickle_in)
pickle_in = open(TEST_CANDIDATES_FILE,"rb")
TEST_DICT = pickle.load(pickle_in)
OUTPUT_DIM = FLAGS.output_dim
np.random.seed(0)
global TRAINING_LATENT_VECTORS
TRAINING_LATENT_VECTORS=[]
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learing_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
query= MODEL.placeholder_inputs(BATCH_SIZE, 1, NUM_POINT)
positives= MODEL.placeholder_inputs(BATCH_SIZE, POSITIVES_PER_QUERY, NUM_POINT)
negatives= MODEL.placeholder_inputs(BATCH_SIZE, NEGATIVES_PER_QUERY, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print (is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
print ("--- Get model and loss")
with tf.variable_scope("query_triplets") as scope:
vecs= tf.concat([query, positives, negatives],1)
print(vecs)
out_vecs, end_points= MODEL.get_model(vecs, is_training_pl, autoencoder=False, bn_decay=bn_decay, output_dim=OUTPUT_DIM)
print(out_vecs)
q_vec, pos_vecs, neg_vecs= tf.split(out_vecs, [1,POSITIVES_PER_QUERY,NEGATIVES_PER_QUERY],1)
# pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
# Get loss
loss = MODEL.triplet_loss(q_vec, pos_vecs, neg_vecs, MARGIN)
# loss = MODEL.chamfer_loss(pred, pointclouds_pl)
tf.summary.scalar('triplet_loss', loss)
print ("--- Get training operator")
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
#sess.run(init, {is_training_pl: True})
ops = {'query': query,
'positives': positives,
'negatives': negatives,
'q_vec':q_vec,
'pos_vecs': pos_vecs,
'neg_vecs': neg_vecs,
'is_training_pl': is_training_pl,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'end_points': end_points}
best_loss = 1e20
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
### Train with hard negatives
# if (epoch > 50):
# train_one_epoch(sess, ops, train_writer, use_hard_neg=True, recache=True)
# else:
# train_one_epoch(sess, ops, train_writer)
epoch_loss = eval_one_epoch(sess, ops, test_writer)
if epoch_loss < best_loss:
best_loss = epoch_loss
save_path = saver.save(sess, os.path.join(LOG_DIR, "best_model_epoch_%03d.ckpt"%(epoch)))
log_string("Model saved in file: %s" % save_path)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer, use_hard_neg=False, recache = False):
""" ops: dict mapping from string to tf ops """
global TRAINING_LATENT_VECTORS
is_training = True
# Sample and shuffle train samples
current_data, positives, negatives, idx = provider.get_current_data_arap(TRAIN_DATA, TRAIN_DICT, POSITIVES_PER_QUERY, NEGATIVES_PER_QUERY, NUM_POINT, shuffle=True)
log_string(str(datetime.now()))
num_batches = current_data.shape[0]//BATCH_SIZE
train_pcs = provider.get_current_data(TRAIN_DATA, NUM_POINT, shuffle=False)
if (use_hard_neg and len(TRAINING_LATENT_VECTORS)==0):
TRAINING_LATENT_VECTORS = get_latent_vectors(sess, ops)
num_hard_negs = 8
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data = current_data[start_idx:end_idx, :, :]
batch_data= np.expand_dims(batch_data,axis=1)
curr_positives = positives[start_idx:end_idx, :, :, :]
if (use_hard_neg):
# print("Using hard negatives")
curr_idx = idx[start_idx:end_idx]
# current_queries = train_pcs[curr_idx]
# query_feat = get_feature_representation(current_queries, sess, ops)
curr_queries_latent_vector = TRAINING_LATENT_VECTORS[curr_idx]
curr_negatives = []
for j in range(BATCH_SIZE):
neg_idx = TRAIN_DICT["negatives"][curr_idx[j]] #returns a list
# if (len(neg_idx) == 0):
# continue
if (len(neg_idx) < NEGATIVES_PER_QUERY):
selected_idx = np.random.choice(neg_idx, NEGATIVES_PER_QUERY, replace=True)
else:
neg_latent_vec = TRAINING_LATENT_VECTORS[np.array(neg_idx)]
query_vec = curr_queries_latent_vector[j]
hard_negs = get_hard_negatives(query_vec, neg_latent_vec, neg_idx, num_hard_negs)
##Get negative pcs
if (len(neg_idx) - num_hard_negs < NEGATIVES_PER_QUERY):
selected_idx = np.random.choice(neg_idx, NEGATIVES_PER_QUERY, replace=False)
selected_idx[:num_hard_negs] = np.array(hard_negs)
else:
neg_idx = np.delete(np.array(neg_idx), np.where(np.isin(np.array(neg_idx) ,np.array(hard_negs))))
to_select_idx = np.arange(0,len(neg_idx))
np.random.shuffle(to_select_idx)
selected_idx = neg_idx[to_select_idx[0:NEGATIVES_PER_QUERY]]
selected_idx[:num_hard_negs] = np.array(hard_negs)
curr_neg_pcs = train_pcs[selected_idx]
curr_negatives.append(curr_neg_pcs)
curr_negatives = np.array(curr_negatives)
if (len(curr_negatives.shape) != 4 or curr_negatives.shape[0]!=BATCH_SIZE):
continue
else:
curr_negatives = negatives[start_idx:end_idx, :, :, :]
feed_dict = {ops['query']: batch_data,
ops['positives']: curr_positives,
ops['negatives']: curr_negatives,
ops['is_training_pl']: is_training}
summary, step, _, loss_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
loss_sum += loss_val
if (batch_idx+1)%10 == 0:
log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
log_string('mean loss: %f' % (loss_sum / 10))
loss_sum = 0
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
# current_data = provider.get_current_data(TEST_DATA, NUM_POINT)
current_data, positives, negatives = provider.get_current_data_triplet(TEST_DATA, TEST_DICT, POSITIVES_PER_QUERY, NEGATIVES_PER_QUERY, NUM_POINT, shuffle=True)
num_batches = current_data.shape[0]//BATCH_SIZE
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data = current_data[start_idx:end_idx, :, :]
batch_data= np.expand_dims(batch_data,axis=1)
curr_positives = positives[start_idx:end_idx, :, :, :]
curr_negatives = negatives[start_idx:end_idx, :, :, :]
feed_dict = {ops['query']: batch_data,
ops['positives']: curr_positives,
ops['negatives']: curr_negatives,
ops['is_training_pl']: is_training}
summary, step, loss_val = sess.run([ops['merged'], ops['step'],
ops['loss']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
loss_sum += loss_val
log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
EPOCH_CNT += 1
return loss_sum/float(num_batches)
def get_hard_negatives(query_vec, neg_latent_vecs, neg_idx, num_to_take):
neg_latent_vecs=np.array(neg_latent_vecs)
nbrs = KDTree(neg_latent_vecs)
distances, indices = nbrs.query(np.array([query_vec]),k=num_to_take)
hard_negs=np.squeeze(np.array(neg_idx)[indices[0]])
hard_negs= hard_negs.tolist()
return hard_negs
def get_latent_vectors(sess, ops):
print("Caching latent features.")
start_time = time.time()
is_training=False
train_idxs = | np.arange(0, TRAIN_DATA.shape[0]) | numpy.arange |
# SPDX-FileCopyrightText: Copyright 2021, <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
import numpy
# ==============================
# find interval with sign change
# ==============================
def find_interval_with_sign_change(f, bracket, num_bracket_trials, args=(), ):
"""
Finds an interval ``[x0, x1]`` in which ``f(x0)`` and ``f(x1)`` have
opposite signs. The interval is used for some root finding algorithms, such
as Brent and Chandrupatla method. Finding such interval is known as
*bracketing* the function.
If the initial interval is not a sitable *bracket*, then it iterates
*num_bracket_trials* times. If within the iterations the bracket is yet not
found, it exits with false status.
"""
# Initialization
bracket_found = False
# Interval bounds
x0 = bracket[0]
x1 = bracket[1]
# Initial bracket
f0 = f(x0, *args)
f1 = f(x1, *args)
# Trials
iterations = 0
while (not bracket_found) and (iterations < num_bracket_trials):
iterations += 1
if | numpy.sign(f0) | numpy.sign |
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as linalg
from landlab import Component
# Things to add: 1. Explicit stability check.
# 2. Implicit handling of scenarios where kappa*dt exceeds critical step -
# subdivide dt automatically.
class PerronNLDiffuse(Component):
"""Nonlinear diffusion, following Perron (2011).
This module uses Taylor Perron's implicit (2011) method to solve the
nonlinear hillslope diffusion equation across a rectangular, regular grid
for a single timestep. Note it works with the mass flux implicitly, and
thus does not actually calculate it. Grid must be at least 5x5.
Boundary condition handling assumes each edge uses the same BC for each of
its nodes.
This component cannot yet handle looped boundary conditions, but all others
should be fine.
This component has KNOWN STABILITY ISSUES which will be resolved in a
future release; use at your own risk.
The primary method of this class is :func:`run_one_step`.
Examples
--------
>>> from landlab.components import PerronNLDiffuse
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.add_zeros("topographic__elevation", at="node")
>>> nl = PerronNLDiffuse(mg, nonlinear_diffusivity=1.)
>>> dt = 100.
>>> nt = 20
>>> uplift_rate = 0.001
>>> for i in range(nt):
... z[mg.core_nodes] += uplift_rate*dt
... nl.run_one_step(dt)
>>> z_target = np.array(
... [ 0. , 0. , 0. , 0. , 0. ,
... 0. , 0.00778637, 0.0075553 , 0.00778637, 0. ,
... 0. , 0.0075553 , 0.0078053 , 0.0075553 , 0. ,
... 0. , 0.00778637, 0.0075553 , 0.00778637, 0. ,
... 0. , 0. , 0. , 0. , 0. ])
>>> np.allclose(z, z_target)
True
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
<NAME>. (2011). Numerical methods for nonlinear hillslope transport laws.
Journal of Geophysical Research 116(F2), 23 - 13.
https://dx.doi.org/10.1029/2010jf001801
"""
_name = "PerronNLDiffuse"
_info = {
"topographic__elevation": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
}
}
def __init__(
self,
grid,
nonlinear_diffusivity=0.01,
S_crit=33.0 * np.pi / 180.0,
rock_density=2700.0,
sed_density=2700.0,
):
"""
Parameters
----------
grid : RasterModelGrid
A Landlab raster grid
nonlinear_diffusivity : float, array or field name
The nonlinear diffusivity
S_crit : float (radians)
The critical hillslope angle
rock_density : float (kg*m**-3)
The density of intact rock
sed_density : float (kg*m**-3)
The density of the mobile (sediment) layer
"""
super(PerronNLDiffuse, self).__init__(grid)
self._bc_set_code = self._grid.bc_set_code
self._values_to_diffuse = "topographic__elevation"
self._kappa = nonlinear_diffusivity
self._rock_density = rock_density
self._sed_density = sed_density
self._S_crit = S_crit
self._uplift = 0.0
self._delta_x = grid.dx
self._delta_y = grid.dy
self._one_over_delta_x = 1.0 / self._delta_x
self._one_over_delta_y = 1.0 / self._delta_y
self._one_over_delta_x_sqd = self._one_over_delta_x ** 2.0
self._one_over_delta_y_sqd = self._one_over_delta_y ** 2.0
self._b = 1.0 / self._S_crit ** 2.0
ncols = grid.number_of_node_columns
self._ncols = ncols
nrows = grid.number_of_node_rows
self._nrows = nrows
nnodes = grid.number_of_nodes
self._nnodes = nnodes
ninteriornodes = grid.number_of_interior_nodes
ncorenodes = ninteriornodes - 2 * (ncols + nrows - 6)
self._ninteriornodes = ninteriornodes
self._interior_grid_width = ncols - 2
self._core_cell_width = ncols - 4
self._interior_corners = np.array(
[ncols + 1, 2 * ncols - 2, nnodes - 2 * ncols + 1, nnodes - ncols - 2]
)
_left_list = np.array(range(2 * ncols + 1, nnodes - 2 * ncols, ncols))
# ^these are still real IDs
_right_list = np.array(range(3 * ncols - 2, nnodes - 2 * ncols, ncols))
_bottom_list = np.array(range(ncols + 2, 2 * ncols - 2))
_top_list = np.array(range(nnodes - 2 * ncols + 2, nnodes - ncols - 2))
self._left_list = _left_list
self._right_list = _right_list
self._bottom_list = _bottom_list
self._top_list = _top_list
self._core_nodes = self._coreIDtoreal(np.arange(ncorenodes, dtype=int))
self._corenodesbyintIDs = self._realIDtointerior(self._core_nodes)
self._ncorenodes = len(self._core_nodes)
self._corner_interior_IDs = self._realIDtointerior(self._interior_corners)
# ^i.e., interior corners as interior IDs
self._bottom_interior_IDs = self._realIDtointerior(np.array(_bottom_list))
self._top_interior_IDs = self._realIDtointerior(np.array(_top_list))
self._left_interior_IDs = self._realIDtointerior(np.array(_left_list))
self._right_interior_IDs = self._realIDtointerior(np.array(_right_list))
# build an ID map to let us easily map the variables of the core nodes
# onto the operating matrix:
# This array is ninteriornodes long, but the IDs it contains are
# REAL IDs
operating_matrix_ID_map = np.empty((ninteriornodes, 9))
self._interior_IDs_as_real = self._interiorIDtoreal(np.arange(ninteriornodes))
for j in range(ninteriornodes):
i = self._interior_IDs_as_real[j]
operating_matrix_ID_map[j, :] = np.array(
[
(i - ncols - 1),
(i - ncols),
(i - ncols + 1),
(i - 1),
i,
(i + 1),
(i + ncols - 1),
(i + ncols),
(i + ncols + 1),
]
)
self._operating_matrix_ID_map = operating_matrix_ID_map
self._operating_matrix_core_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[self._corenodesbyintIDs, :]
)
# ^shape(ncorenodes,9)
# see below for corner and edge maps
# Build masks for the edges and corners to be applied to the operating
# matrix map.
# Antimasks are the boundary nodes, masks are "normal"
self._topleft_mask = [1, 2, 4, 5]
topleft_antimask = [0, 3, 6, 7, 8]
self._topright_mask = [0, 1, 3, 4]
topright_antimask = [2, 5, 6, 7, 8]
self._bottomleft_mask = [4, 5, 7, 8]
bottomleft_antimask = [0, 1, 2, 3, 6]
self._bottomright_mask = [3, 4, 6, 7]
bottomright_antimask = [0, 1, 2, 5, 8]
self._corners_masks = np.vstack(
(
self._bottomleft_mask,
self._bottomright_mask,
self._topleft_mask,
self._topright_mask,
)
)
# ^(each_corner,mask_for_each_corner)
self._corners_antimasks = np.vstack(
(
bottomleft_antimask,
bottomright_antimask,
topleft_antimask,
topright_antimask,
)
)
# ^so shape becomes (4,5)
self._left_mask = [1, 2, 4, 5, 7, 8]
self._left_antimask = [0, 3, 6]
self._top_mask = [0, 1, 2, 3, 4, 5]
self._top_antimask = [6, 7, 8]
self._right_mask = [0, 1, 3, 4, 6, 7]
self._right_antimask = [2, 5, 8]
self._bottom_mask = [3, 4, 5, 6, 7, 8]
self._bottom_antimask = [0, 1, 2]
self._antimask_corner_position = [0, 2, 2, 4]
# ^this is the position w/i the corner antimasks that the true corner
# actually occupies
self._modulator_mask = np.array(
[-ncols - 1, -ncols, -ncols + 1, -1, 0, 1, ncols - 1, ncols, ncols + 1]
)
self.updated_boundary_conditions()
def updated_boundary_conditions(self):
"""Call if grid BCs are updated after component instantiation."""
grid = self._grid
nrows = self._nrows
ncols = self._ncols
# ^Set up terms for BC handling (still feels very clumsy)
bottom_edge = grid.nodes_at_bottom_edge[1:-1]
top_edge = grid.nodes_at_top_edge[1:-1]
left_edge = grid.nodes_at_left_edge[1:-1]
right_edge = grid.nodes_at_right_edge[1:-1]
self._bottom_flag = 1
self._top_flag = 1
self._left_flag = 1
self._right_flag = 1
# self._corner_flags = [1,1,1,1] #In ID order, so BL,BR,TL,TR
if np.all(grid.status_at_node[bottom_edge] == 4):
# ^This should be all of them, or none of them
self._bottom_flag = 4
elif np.all(grid.status_at_node[bottom_edge] == 3):
self._bottom_flag = 3
elif np.all(grid.status_at_node[bottom_edge] == 2):
self._bottom_flag = 2
elif np.all(grid.status_at_node[bottom_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
# Note this could get fraught if we need to open a cell to let
# water flow out...
if np.all(grid.status_at_node[top_edge] == 4):
self._top_flag = 4
elif np.all(grid.status_at_node[top_edge] == 3):
self._top_flag = 3
elif np.all(grid.status_at_node[top_edge] == 2):
self._top_flag = 2
elif np.all(grid.status_at_node[top_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
if np.all(grid.status_at_node[left_edge] == 4):
self._left_flag = 4
elif np.all(grid.status_at_node[left_edge] == 3):
self._left_flag = 3
elif np.all(grid.status_at_node[left_edge] == 2):
self._left_flag = 2
elif np.all(grid.status_at_node[left_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
if np.all(grid.status_at_node[right_edge] == 4):
self._right_flag = 4
elif np.all(grid.status_at_node[right_edge] == 3):
self._right_flag = 3
elif np.all(grid.status_at_node[right_edge] == 2):
self._right_flag = 2
elif np.all(grid.status_at_node[right_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
self._fixed_grad_BCs_present = (
self._bottom_flag == 2
or self._top_flag == 2
or self._left_flag == 2
or self._right_flag == 2
)
self._looped_BCs_present = (
self._bottom_flag == 3
or self._top_flag == 3
or self._left_flag == 3
or self._right_flag == 3
)
if self._fixed_grad_BCs_present:
if self._values_to_diffuse != grid.fixed_gradient_of:
raise ValueError(
"Boundary conditions set in the grid don't "
"apply to the data the diffuser is trying to "
"work with"
)
if np.any(grid.status_at_node == 2):
self._fixed_grad_offset_map = np.empty(nrows * ncols, dtype=float)
self._fixed_grad_anchor_map = np.empty_like(self._fixed_grad_offset_map)
self._fixed_grad_offset_map[
grid.fixed_gradient_node_properties["boundary_node_IDs"]
] = grid.fixed_gradient_node_properties["values_to_add"]
self._corner_flags = grid.status_at_node[[0, ncols - 1, -ncols, -1]]
op_mat_just_corners = self._operating_matrix_ID_map[
self._corner_interior_IDs, :
]
op_mat_cnr0 = op_mat_just_corners[0, self._bottomleft_mask]
op_mat_cnr1 = op_mat_just_corners[1, self._bottomright_mask]
op_mat_cnr2 = op_mat_just_corners[2, self._topleft_mask]
op_mat_cnr3 = op_mat_just_corners[3, self._topright_mask]
op_mat_just_active_cnrs = np.vstack(
(op_mat_cnr0, op_mat_cnr1, op_mat_cnr2, op_mat_cnr3)
)
self._operating_matrix_corner_int_IDs = self._realIDtointerior(
op_mat_just_active_cnrs
)
# ^(4corners,4nodesactivepercorner)
self._operating_matrix_bottom_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._bottom_interior_IDs, :][
:, self._bottom_mask
]
)
# ^(nbottomnodes,6activenodeseach)
self._operating_matrix_top_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._top_interior_IDs, :][:, self._top_mask]
)
self._operating_matrix_left_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._left_interior_IDs, :][
:, self._left_mask
]
)
self._operating_matrix_right_int_IDs = self._realIDtointerior(
self._operating_matrix_ID_map[self._right_interior_IDs, :][
:, self._right_mask
]
)
def _gear_timestep(self, timestep_in, new_grid):
"""This method allows the gearing between the model run step and the
component (shorter) step.
The method becomes unstable if S>Scrit, so we test to prevent
this. We implicitly assume the initial condition does not
contain slopes > Scrit. If the method persistently explodes,
this may be the problem.
"""
extended_elevs = np.empty(self._grid.number_of_nodes + 1, dtype=float)
extended_elevs[-1] = np.nan
node_neighbors = self._grid.active_adjacent_nodes_at_node
extended_elevs[:-1] = new_grid["node"][self._values_to_diffuse]
max_offset = np.nanmax(
np.fabs(
extended_elevs[:-1][node_neighbors]
- extended_elevs[:-1].reshape((self._grid.number_of_nodes, 1))
)
)
if max_offset > np.tan(self._S_crit) * min(self._grid.dx, self._grid.dy):
# ^using S not tan(S) adds a buffer - but not appropriate
self._internal_repeats = (
int(
max_offset
// (np.tan(self._S_crit) * min(self._grid.dx, self._grid.dy))
)
+ 1
)
# now we rig it so the actual timestep is an integer divisor
# of T_in:
self._delta_t = timestep_in / self._internal_repeats
self._uplift_per_step = (
new_grid["node"][self._values_to_diffuse]
- self._grid["node"][self._values_to_diffuse]
) / self._internal_repeats
if self._internal_repeats > 10000:
raise ValueError(
"""Uplift rate is too high; solution is not
stable!!"""
)
else:
self._internal_repeats = 1
self._delta_t = timestep_in
self._uplift_per_step = (
new_grid["node"][self._values_to_diffuse]
- self._grid["node"][self._values_to_diffuse]
)
return self._delta_t
def _set_variables(self, grid):
"""This function sets the variables needed for update().
Now vectorized, shouold run faster. At the moment, this method
can only handle fixed value BCs.
"""
n_interior_nodes = grid.number_of_interior_nodes
# Initialize the local builder lists
_mat_RHS = np.zeros(n_interior_nodes)
try:
elev = grid["node"][self._values_to_diffuse]
except KeyError:
raise NameError("elevations not found in grid!")
try:
_delta_t = self._delta_t
except AttributeError:
raise NameError(
"""Timestep not set! Call _gear_timestep(tstep)
after initializing the component, but before
running it."""
)
_one_over_delta_x = self._one_over_delta_x
_one_over_delta_x_sqd = self._one_over_delta_x_sqd
_one_over_delta_y = self._one_over_delta_y
_one_over_delta_y_sqd = self._one_over_delta_y_sqd
_kappa = self._kappa
_b = self._b
_S_crit = self._S_crit
_core_nodes = self._core_nodes
corenodesbyintIDs = self._corenodesbyintIDs
operating_matrix_core_int_IDs = self._operating_matrix_core_int_IDs
operating_matrix_corner_int_IDs = self._operating_matrix_corner_int_IDs
_interior_corners = self._interior_corners
corners_antimasks = self._corners_antimasks
corner_interior_IDs = self._corner_interior_IDs
modulator_mask = self._modulator_mask
corner_flags = self._corner_flags
bottom_interior_IDs = self._bottom_interior_IDs
top_interior_IDs = self._top_interior_IDs
left_interior_IDs = self._left_interior_IDs
right_interior_IDs = self._right_interior_IDs
bottom_antimask = self._bottom_antimask
_bottom_list = self._bottom_list
top_antimask = self._top_antimask
_top_list = self._top_list
left_antimask = self._left_antimask
_left_list = self._left_list
right_antimask = self._right_antimask
_right_list = self._right_list
# Need to modify the "effective" values of the edge nodes if any of
# the edges are inactive:
if self._bottom_flag == 4:
bottom_edge, inside_bottom_edge = grid.nodes[(0, 1), :]
elev[bottom_edge] = elev[inside_bottom_edge]
# corners are special cases, and assumed linked to the bottom and
# top edge BCs...
elev[bottom_edge[0]] = elev[inside_bottom_edge[1]]
elev[bottom_edge[-1]] = elev[inside_bottom_edge[-2]]
if self._top_flag == 4:
top_edge, inside_top_edge = grid.nodes[(-1, -2), :]
elev[top_edge] = elev[inside_top_edge]
# corners are special cases, and assumed linked to the bottom and
# top edge BCs...
elev[top_edge[0]] = elev[inside_top_edge[1]]
elev[top_edge[-1]] = elev[inside_top_edge[-2]]
if self._left_flag == 4:
left_edge = grid.nodes[1:-1, 0]
inside_left_edge = grid.nodes[1:-1, 1]
elev[left_edge] = elev[inside_left_edge]
if self._right_flag == 4:
right_edge = grid.nodes[1:-1, -1]
inside_right_edge = grid.nodes[1:-1, -2]
elev[right_edge] = elev[inside_right_edge]
# replacing loop:
cell_neighbors = grid.active_adjacent_nodes_at_node
# ^E,N,W,S
cell_diagonals = grid.diagonal_adjacent_nodes_at_node # NE,NW,SW,SE
# ^this should be dealt with by active_neighbors... (skips bad nodes)
_z_x = (
(elev[cell_neighbors[:, 0]] - elev[cell_neighbors[:, 2]])
* 0.5
* _one_over_delta_x
)
_z_y = (
(elev[cell_neighbors[:, 1]] - elev[cell_neighbors[:, 3]])
* 0.5
* _one_over_delta_y
)
_z_xx = (
elev[cell_neighbors[:, 0]] - 2.0 * elev + elev[cell_neighbors[:, 2]]
) * _one_over_delta_x_sqd
_z_yy = (
elev[cell_neighbors[:, 1]] - 2.0 * elev + elev[cell_neighbors[:, 3]]
) * _one_over_delta_y_sqd
_z_xy = (
(
elev[cell_diagonals[:, 0]]
- elev[cell_diagonals[:, 1]]
- elev[cell_diagonals[:, 3]]
+ elev[cell_diagonals[:, 2]]
)
* 0.25
* _one_over_delta_x
* _one_over_delta_y
)
_d = 1.0 / (1.0 - _b * (_z_x * _z_x + _z_y * _z_y))
_abd_sqd = _kappa * _b * _d * _d
_F_ij = -2.0 * _kappa * _d * (
_one_over_delta_x_sqd + _one_over_delta_y_sqd
) - 4.0 * _abd_sqd * (
_z_x * _z_x * _one_over_delta_x_sqd + _z_y * _z_y * _one_over_delta_y_sqd
)
_F_ijminus1 = (
_kappa * _d * _one_over_delta_x_sqd
- _abd_sqd * _z_x * (_z_xx + _z_yy) * _one_over_delta_x
- 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_x
* _one_over_delta_x
- 2.0
* _abd_sqd
* (
_z_x * _z_xx * _one_over_delta_x
- _z_x * _z_x * _one_over_delta_x_sqd
+ _z_y * _z_xy * _one_over_delta_x
)
)
_F_ijplus1 = (
_kappa * _d * _one_over_delta_x_sqd
+ _abd_sqd * _z_x * (_z_xx + _z_yy) * _one_over_delta_x
+ 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_x
* _one_over_delta_x
+ 2.0
* _abd_sqd
* (
_z_x * _z_xx * _one_over_delta_x
+ _z_x * _z_x * _one_over_delta_x_sqd
+ _z_y * _z_xy * _one_over_delta_x
)
)
_F_iminus1j = (
_kappa * _d * _one_over_delta_y_sqd
- _abd_sqd * _z_y * (_z_xx + _z_yy) * _one_over_delta_y
- 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_y
* _one_over_delta_y
- 2.0
* _abd_sqd
* (
_z_y * _z_yy * _one_over_delta_y
- _z_y * _z_y * _one_over_delta_y_sqd
+ _z_x * _z_xy * _one_over_delta_y
)
)
_F_iplus1j = (
_kappa * _d * _one_over_delta_y_sqd
+ _abd_sqd * _z_y * (_z_xx + _z_yy) * _one_over_delta_y
+ 4.0
* _abd_sqd
* _b
* _d
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
* _z_y
* _one_over_delta_y
+ 2.0
* _abd_sqd
* (
_z_y * _z_yy * _one_over_delta_y
+ _z_y * _z_y * _one_over_delta_y_sqd
+ _z_x * _z_xy * _one_over_delta_y
)
)
_F_iplus1jplus1 = _abd_sqd * _z_x * _z_y * _one_over_delta_x * _one_over_delta_y
_F_iminus1jminus1 = _F_iplus1jplus1
_F_iplus1jminus1 = -_F_iplus1jplus1
_F_iminus1jplus1 = _F_iplus1jminus1
_equ_RHS_calc_frag = (
_F_ij * elev
+ _F_ijminus1 * elev[cell_neighbors[:, 2]]
+ _F_ijplus1 * elev[cell_neighbors[:, 0]]
+ _F_iminus1j * elev[cell_neighbors[:, 3]]
+ _F_iplus1j * elev[cell_neighbors[:, 1]]
+ _F_iminus1jminus1 * elev[cell_diagonals[:, 2]]
+ _F_iplus1jplus1 * elev[cell_diagonals[:, 0]]
+ _F_iplus1jminus1 * elev[cell_diagonals[:, 1]]
+ _F_iminus1jplus1 * elev[cell_diagonals[:, 3]]
)
# NB- all _z_... and _F_... variables are nnodes long, and thus use
# real IDs (tho calcs will be flawed for Bnodes)
# RHS of equ 6 (see para [20])
_func_on_z = self._rock_density / self._sed_density * self._uplift + _kappa * (
(_z_xx + _z_yy) / (1.0 - (_z_x * _z_x + _z_y * _z_y) / _S_crit * _S_crit)
+ 2.0
* (_z_x * _z_x * _z_xx + _z_y * _z_y * _z_yy + 2.0 * _z_x * _z_y * _z_xy)
/ (
_S_crit
* _S_crit
* (1.0 - (_z_x * _z_x + _z_y * _z_y) / _S_crit * _S_crit) ** 2.0
)
)
# Remember, the RHS is getting wiped each loop as part of
# self._set_variables()
# _mat_RHS is ninteriornodes long, but were only working on a
# ncorenodes long subset here
_mat_RHS[corenodesbyintIDs] += elev[_core_nodes] + _delta_t * (
_func_on_z[_core_nodes] - _equ_RHS_calc_frag[_core_nodes]
)
low_row = (
np.vstack((_F_iminus1jminus1, _F_iminus1j, _F_iminus1jplus1)) * -_delta_t
)
mid_row = np.vstack(
(-_delta_t * _F_ijminus1, 1.0 - _delta_t * _F_ij, -_delta_t * _F_ijplus1)
)
top_row = np.vstack((_F_iplus1jminus1, _F_iplus1j, _F_iplus1jplus1)) * -_delta_t
nine_node_map = np.vstack((low_row, mid_row, top_row)).T
# ^Note shape is (nnodes,9); it's realID indexed
core_op_mat_row = np.repeat(corenodesbyintIDs, 9)
core_op_mat_col = operating_matrix_core_int_IDs.astype(int).flatten()
core_op_mat_data = nine_node_map[_core_nodes, :].flatten()
# Now the interior corners; BL,BR,TL,TR
_mat_RHS[corner_interior_IDs] += elev[_interior_corners] + _delta_t * (
_func_on_z[_interior_corners] - _equ_RHS_calc_frag[_interior_corners]
)
corners_op_mat_row = np.repeat(self._corner_interior_IDs, 4)
corners_op_mat_col = operating_matrix_corner_int_IDs.astype(int).flatten()
corners_op_mat_data = nine_node_map[_interior_corners, :][
(np.arange(4).reshape((4, 1)), self._corners_masks)
].flatten()
# ^1st index gives (4,9), 2nd reduces to (4,4), then flattened
for i in range(4): # loop over each corner, as so few
# Note that this ONLY ADDS THE VALUES FOR THE TRUE GRID CORNERS.
# The sides get done in the edge tests, below.
if corner_flags[i] == 1:
true_corner = self._antimask_corner_position[i]
_mat_RHS[corner_interior_IDs[i]] -= _delta_t * np.sum(
nine_node_map[_interior_corners[i], :][
corners_antimasks[i, true_corner]
]
* elev[
_interior_corners[i]
+ modulator_mask[corners_antimasks[i, true_corner]]
]
)
elif corner_flags[i] == 4 or corner_flags[i] == 3:
# ^inactive boundary cell
# Actually the easiest case! Equivalent to fixed gradient,
# but the gradient is zero, so material only goes in the linked
# cell. And because it's a true corner, that linked cell
# doesn't appear in the interior matrix at all!
pass
elif corner_flags[i] == 2:
true_corner = self._antimask_corner_position[i]
_mat_RHS[corner_interior_IDs[i]] -= _delta_t * np.sum(
nine_node_map[_interior_corners[i], :][
corners_antimasks[i, true_corner]
]
* self._fixed_gradient_offset_map[
_interior_corners[i]
+ modulator_mask[corners_antimasks[i, true_corner]]
]
)
else:
raise NameError(
"""Sorry! This module cannot yet handle fixed
gradient or looped BCs..."""
)
# Todo: handle these BCs properly, once the grid itself works with
# them.
# Can follow old routines; see self.set_bc_cell() commented out
# method below.
# Now the edges
_mat_RHS[bottom_interior_IDs] += elev[_bottom_list] + _delta_t * (
_func_on_z[_bottom_list] - _equ_RHS_calc_frag[_bottom_list]
)
_mat_RHS[top_interior_IDs] += elev[_top_list] + _delta_t * (
_func_on_z[_top_list] - _equ_RHS_calc_frag[_top_list]
)
_mat_RHS[left_interior_IDs] += elev[_left_list] + _delta_t * (
_func_on_z[_left_list] - _equ_RHS_calc_frag[_left_list]
)
_mat_RHS[right_interior_IDs] += elev[_right_list] + _delta_t * (
_func_on_z[_right_list] - _equ_RHS_calc_frag[_right_list]
)
bottom_op_mat_row = np.repeat(bottom_interior_IDs, 6)
top_op_mat_row = | np.repeat(top_interior_IDs, 6) | numpy.repeat |
"""
该脚本用于调用训练好的模型权重去计算验证集/测试集的COCO指标
以及每个类别的mAP(IoU=0.5)
"""
import os
import json
import torch
from tqdm import tqdm
import numpy as np
import transforms
from network_files import FasterRCNN
from backbone import resnet50_fpn_backbone
from my_dataset import VOC2012DataSet
from train_utils import get_coco_api_from_dataset, CocoEvaluator
def summarize(self, catId=None):
"""
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
"""
def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
if isinstance(catId, int):
s = s[:, :, catId, aind, mind]
else:
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
if isinstance(catId, int):
s = s[:, catId, aind, mind]
else:
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = | np.mean(s[s > -1]) | numpy.mean |
"""Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import detect_face
# import facenet
import facenet.src.facenet as facenet
import numpy as np
import tensorflow as tf
from scipy import misc
def main(args):
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Store some git revision info in a text file in the log directory
src_path, _ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename + '.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim < 2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:, :, 0:3]
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces > 1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
img_center = img_size / 2
offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
index = np.argmax(
bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
det_arr.append(det[index, :])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - args.margin / 2, 0)
bb[1] = np.maximum(det[1] - args.margin / 2, 0)
bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
bb[3] = | np.minimum(det[3] + args.margin / 2, img_size[0]) | numpy.minimum |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
Test suite for the util.py module.
The tests must be linked with a function space class object in the setUp method:
to run the use:
from esys.bruce import Brick
class Test_utilOnBruce(Test_util_no_tagged_data):
def setUp(self):
self.domain = Brick(10,10,13)
self.functionspace = ContinuousFunction(self.domain)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_utilOnBruce))
unittest.TextTestRunner(verbosity=2).run(suite)
This test assumes that samples with x_0 coordinate 0 are tagged with 1 and all samples tagged with 1 have x_0
coordinate 0.
:note: at this stage this test will not pass as it tests for functionlity that has not been implemented yet. It also
does not test the full functionalitu of util.py yet.
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base, Test_util_values
from test_util_reduction_new import Test_util_reduction_new
from test_util_unary_new import Test_util_unary_new
from test_util_binary_new import Test_util_binary_new
from test_util_binary_leftover import Test_util_binary_leftover
## these aspects are test in the _new tests
#from test_util_overloaded_binary_no_tagged_data import Test_util_overloaded_binary_no_tagged_data
#from test_util_overloaded_binary_with_tagged_data import Test_util_overloaded_binary_with_tagged_data
#from test_util_unary_no_tagged_data import Test_util_unary_no_tagged_data
#from test_util_unary_with_tagged_data import Test_util_unary_with_tagged_data
#from test_util_binary_no_tagged_data import Test_util_binary_no_tagged_data
#from test_util_binary_with_tagged_data import Test_util_binary_with_tagged_data
from test_util_spatial_functions1 import Test_Util_SpatialFunctions_noGradOnBoundary_noContact
from test_util_spatial_functions2 import Test_Util_SpatialFunctions_noGradOnBoundary
from test_util_spatial_functions3 import Test_Util_SpatialFunctions
from test_util_slicing_no_tagged_data import Test_util_slicing_no_tagged_data
from test_util_slicing_with_tagged_data import Test_util_slicing_with_tagged_data
class Test_util_reduction(Test_util_reduction_new):
""" test for reduction operation Lsup,sup,inf for all data types"""
pass
class Test_util_unary(Test_util_unary_new):
""" all unary tests """
pass
class Test_util_binary(Test_util_binary_new, Test_util_binary_leftover):
"""
test for all binary operation
"""
pass
## Testing of these ops is now in Test_util_binary
#class Test_util_overloaded_binary(Test_util_overloaded_binary_no_tagged_data,Test_util_overloaded_binary_with_tagged_data):
#"""test for all overloaded operation"""
#pass
class Test_util(Test_util_unary_new,Test_util_reduction_new, Test_util_binary):
"""all tests"""
pass
class Test_util_overloaded_binary_still_failing(Test_util_base):
"""
these overloaded operations still fail!
- wrong return value of Data binaries (Mantis 0000054)
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.93686078973,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.51662736235119944, 2.8171396846123073])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.4202334273802917, -2.1197211051191838]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(-2.22764991169,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[2.0746979587719538, 0.99992890307042437, -2.3128078094931848, -4.0103712739722654,
4.8853529531011013],
[0.09856857946648212, 0.73520899085847624, -3.6585265509750844, 3.0095320582437939, 3.4125902906059444],
[1.4894150898632059,
-1.4124339049368793, 1.5397397961722188, 4.8841402613336111, 1.1241155288598881], [2.8283598865494408,
1.5980765295723476,
-1.0022373011497274, -2.0622178471715067, 4.9699555072046042]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.15295195292152819, -1.2277210086230577, -4.5404577211866668, -6.2380211856657475,
2.6577030414076193],
[-2.1290813322269999, -1.4924409208350058, -5.8861764626685664, 0.78188214655031185, 1.1849403789124624],
[-0.73823482183027611,
-3.6400838166303613, -0.68791011552126324, 2.6564903496401291, -1.103534382833594], [0.60070997485595878,
-0.62957338212113445,
-3.2298872128432095, -4.2898677588649887, 2.7423055955111222]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.67318656609,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.9409337165894076, 1.6101568824796857], [1.2441782896909706, 1.2872758759353298]],
[[4.022494973005406,
-2.758155583474049], [1.8311643900357311, 4.0940647266277157]], [[2.5378127449303243, 0.063283784588161751],
[4.5495644157820809,
2.8673770080506742]], [[-0.93484143473477577, 4.914438575705228], [-1.951066895455166, -1.2021165219313259]],
[[-0.4220608661301819, -4.9682501775464418], [0.98338081352961559, 3.4054674805751066]], [[3.9967556325744127,
-4.7659141789100659],
[0.34265275409881024, -0.25226631819007572]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.73225284950136693, -3.0630296836110888], [-3.429008276399804, -3.3859106901554448]],
[[-0.6506915930853685, -7.4313421495648235], [-2.8420221760550435, -0.57912183946305884]],
[[-2.1353738211604503,
-4.6099027815026128], [-0.12362215030869361, -1.8058095580401003]], [[-5.6080280008255503,
0.24125200961445348],
[-6.6242534615459405, -5.8753030880221004]], [[-5.0952474322209564, -9.6414367436372164],
[-3.6898057525611589,
-1.2677190855156679]], [[-0.67643093351636185, -9.4391007450008395], [-4.3305338119919643,
-4.9254528842808503]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(4.16645075056,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.5917180025121436, -0.50082927718401749, 0.71261274386013618, 2.4216324938382936],
[2.5988764746053095,
0.15985324844397741, -2.1952754277135025, -2.1102730593254035], [4.7816092243808672, -3.1240954141765496,
4.0831220997721331, 2.4301203557965216]], [[3.4691826046114969, -2.4961081730013177, -4.9623977358253111,
2.2652744558918698],
[0.41830032681767193, -3.2186897293959649, -4.1590967541108324, -1.7789994379155196], [-0.17901184206486764,
-0.85223673399918809, 1.2515459884606104, -4.530305999148645]]], [[[-4.9028671865135838, 3.9106181278983012,
0.69716765577825246, 4.8537569187159395], [-2.8912890367657318, -4.8177854256421764, -4.3303142092509415,
-0.99481907472179198], [-1.2640734452454305, 4.8028129765204639, -2.5491771511234962, 3.2550469051981921]],
[[2.0572417475748761, 3.7392706991121187, 4.5778678295843704, 3.6658188498258486], [-2.7069743698567206,
-2.684769111460461, -3.0941141983763156, -2.1180719361316589], [-1.4744678905986119, 1.926687036555828,
2.2206999030392947, 0.72956973127168734]]], [[[-2.8290294475300151, -3.1467788245496631, 3.6471044178360348,
3.5237454065241209], [-1.6165850845596652, 1.2437746199742081, -2.8022357261752004, -1.9652183524467781],
[-2.3842126490032092, 3.7068998814751613, -1.389546865398994, -1.7153758702474589]], [[-1.0746517242894815,
-4.3575382718398723, 0.93160793707280121, 1.4002531109392731], [-1.5745690740270168, -3.4394046042905124,
4.2641517580348793, -1.7620679696550843], [-4.2559205627171135, 2.1912319337278863, 1.1987265764805723,
-3.2957352772592809]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.7581687530761378, 3.6656214733799768, 4.8790634944241305, 6.5880832444022879],
[6.7653272251693037, 4.3263039990079717, 1.9711753228504918, 2.0561776912385907], [8.9480599749448615,
1.0423553363874447, 8.2495728503361274, 6.5965711063605159]], [[7.6356333551754911, 1.6703425775626766,
-0.7959469852613168, 6.4317252064558641], [4.5847510773816662, 0.94776102116802941, 0.0073539964531619262,
2.3874513126484747], [3.9874389084991266, 3.3142140165648062, 5.4179967390246047, -0.36385524858465068]]],
[[[-0.7364164359495895, 8.0770688784622955, 4.8636184063422467, 9.0202076692799338], [1.2751617137982625,
-0.6513346750781821, -0.16386345868694718, 3.1716316758422023], [2.9023773053185637, 8.9692637270844582,
1.6172735994404981, 7.4214976557621863]], [[6.2236924981388704, 7.905721449676113, 8.7443185801483647,
7.8322696003898429], [1.4594763807072737, 1.4816816391035332, 1.0723365521876786, 2.0483788144323354],
[2.6919828599653823, 6.0931377871198222, 6.3871506536032889, 4.8960204818356816]]], [[[1.3374213030339792,
1.0196719260143312, 7.8135551684000291, 7.6901961570881152], [2.5498656660043291, 5.4102253705382024,
1.3642150243887938, 2.2012323981172162], [1.7822381015607851, 7.8733506320391555, 2.7769038851650003,
2.4510748803165354]], [[3.0917990262745128, -0.19108752127587803, 5.0980586876367955, 5.5667038615032673],
[2.5918816765369774, 0.72704614627348185, 8.4306025085988736, 2.40438278090891], [-0.089469812153119221,
6.3576826842918805, 5.3651773270445666, 0.87071547330471333]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.8454947431609945, 3.4801848055393254]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.181985677208)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([4.0274804203691783, 3.6621704827475092]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.6719646801005306, 4.0262173014652003]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.7355891147806837, -3.0309968912239551])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([6.4075537948812142, 0.99522041024124519]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.209887477038702, 2.087043312051243, 3.7254247294014622,
-3.7510652436671732, 0.70343608099575317], [4.1654611738215745, 1.5418518980850271,
2.7730022594684423, 3.386030420596251, 1.2758288509710365], [2.2174938185138764,
-1.244837837360393, 2.2331288285078887, -1.1442348969501834, 1.9394801392868004],
[0.68612447219195705, 0.7127527031233436, -3.6346644102130776, 2.0671128943191714,
3.7445028703597156]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.82316401579)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0330514928326018, 6.9102073278451428, 8.5485887451953619,
1.0720987721267266, 5.5266000967896529], [8.9886251896154743, 6.3650159138789268,
7.596166275262342, 8.2091944363901508, 6.0989928667649362], [7.0406578343077761,
3.5783261784335068, 7.0562928443017885, 3.6789291188437163, 6.7626441550807002],
[5.5092884879858568, 5.5359167189172434, 1.1884996055808221, 6.8902769101130712,
8.5676668861536154]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-3.62961836797558, 4.0323249470469893, -2.4833229912823516,
-0.0081902035785272886, -0.26448613257378906], [2.0867535529248489, 0.049446344294963751,
4.4906317789174501, 2.6121865600043499, 1.3687146632565392], [4.2509170325103511,
2.9845191554148567, -0.9329820582137387, -0.58236994049271118, -3.4448732067194388],
[-2.3231599587033402, 1.6550934434842866, -4.5990521452319584, -2.1470268566500152,
-3.9698084155531008]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[3.3234017918244003, 3.3386199217996175, -2.5928786077225316,
-4.1429140632213803, 0.42204291369978719], [3.4123580113357495, -3.9076190537235664,
1.8779298531672159, 0.98377543853039562, -4.9365820051249267], [4.5252395032935961,
-4.8193051910732096, 1.060979071451845, -3.2927325266544871, -3.3828356655691971],
[-4.6411804903406182, -0.42921544747540707, -2.4541073523344323, -0.70845691989162329,
-1.2357505826155588]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.3062165761511797, 7.3709448688466068, -5.0762015990048832,
-4.1511042667999076, 0.15755678112599814], [5.4991115642605983, -3.8581727094286027,
6.3685616320846661, 3.5959619985347455, -3.5678673418683875], [8.7761565358039473,
-1.834786035658353, 0.12799701323810631, -3.8751024671471983, -6.8277088722886354],
[-6.9643404490439584, 1.2258779960088795, -7.0531594975663907, -2.8554837765416385,
-5.2055589981686596]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.0819775543023136, 4.4438294149957258], [1.203494127071604,
1.3934659764012478]], [[-1.7207192546012995, 1.128687542370864], [1.013953229943537,
2.0535582502969056]], [[-1.8482126685735398, 0.64499519705235819],
[-4.1200947648310313, 3.8041018736261574]], [[-0.12876390427677542, -0.26859118353213773],
[-2.8945993824974847, -3.3476923883525944]], [[3.1332107854705562, -4.6334666373330595],
[3.0499420638074994, -2.7959034777693104]], [[4.726734207260332, -1.3724501610660034],
[3.3499737674080023, -2.515294322458935]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.860178486532)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.2217990677700952, 5.3040079015279442], [2.0636726136038224,
2.2536444629334662]], [[-0.86054076806908109, 1.9888660289030824], [1.8741317164757554,
2.913736736829124]], [[-0.98803418204132143, 1.5051736835845766], [-3.2599162782988129,
4.6642803601583758]], [[0.73141458225544298, 0.59158730300008067], [-2.0344208959652663,
-2.487513901820376]], [[3.9933892720027746, -3.7732881508008411], [3.9101205503397178,
-1.935724991237092]], [[5.5869126937925504, -0.51227167453378497], [4.2101522539402207,
-1.6551158359267166]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-1.849788129717993, 0.64693319038907493], [3.0379670344950327,
0.80277076526299229]], [[2.4995340022105639, -4.3955703049125949], [0.58331276679079203,
0.044119077451267863]], [[2.2979922792046947, 1.6054844683234073], [0.50524258350986084,
-3.5539312710422779]], [[-1.1980433912188793, -2.6450000406046001], [-2.4128326188310121,
0.80678465051263526]], [[-2.9963692865064209, -1.0152803020104519], [-0.21931259441936035,
-1.153119362615751]], [[-4.2927186206837717, 0.4561872009236847], [3.0860876046130041,
-0.78568544768378068]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.4985389035935222, 1.8888458641158987], [-4.2891085749380489,
2.8296217607019845]], [[-0.8200921678141917, 4.4359194831012676],
[-4.6185751325042244, 0.16520675598470014]], [[-2.801157092531934, 3.6231020804204928],
[1.5439760747845899, 2.0378140868272894]], [[0.99864930993784018, 3.369884315459073],
[4.399815205976239, -4.9546136700941936]], [[1.6240932313892289, -3.4517363344048615],
[2.8668483027947236, 1.1624090061600336]], [[2.6364367974081624, 2.628371373764919],
[-2.5877409052653833, -1.29236451403668]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-5.3483270333115147, 2.5357790545049737], [-1.2511415404430162,
3.6323925259649767]], [[1.6794418343963722, 0.040349178188672674],
[-4.0352623657134323, 0.209325833435968]], [[-0.50316481332723928, 5.2285865487439001],
[2.0492186582944507, -1.5161171842149885]], [[-0.19939408128103908, 0.72488427485447282],
[1.9869825871452269, -4.1478290195815584]], [[-1.372276055117192, -4.4670166364153134],
[2.6475357083753632, 0.0092896435442826331]], [[-1.6562818232756094,
3.0845585746886037], [0.49834669934762088, -2.0780499617204606]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.026017904532606551, -0.80192450547405958,
0.93785799257835656, -4.4900007911078319], [-1.8444162073720949,
1.2059856695600812, 1.8326324480310756, 3.3745782356451564],
[3.0929324433706693, -0.94197156488767142, -2.3469684397851207,
-4.8976052662192613]], [[1.2658444546015346, 3.0389250549456399,
-2.567254770133963, 3.7513728753285314], [-0.10225306211433605,
-0.34121316520335299, -2.8745573331597321, -0.73976781968982142],
[4.6114590072566681, 3.5325642767850063, 2.1587079910040661,
3.8644723652636905]]], [[[-2.5953113243103623, 0.6437882672443429,
4.5677362343759853, 3.4108524985046262], [2.9904338528780352,
0.73113299006492127, 2.4253724263400445, 3.8646536702562031],
[-1.2545053686514152, -4.2675706218911706, -3.6576679389702105,
-0.29502287354943402]], [[0.9550527228483654, 2.9537233833481267,
-2.6904009310953283, 1.5998857010519698], [-3.7171702199982004,
-1.1578306702024044, 1.764070139728485, -1.1506068782808967],
[1.5727320181060982, 0.18468074769418674, 3.3262967055395372,
-1.2208265816075849]]], [[[-0.25003967903418278, -2.603663543909648,
4.6824047463125531, 1.0968919539473987], [1.3471700099604398,
-3.8321880437450218, -4.2809409903460676, 1.2933005361204906],
[-2.857251250328674, 3.6768205829450178, -2.7999953058490643,
2.1117422072666692]], [[-2.1994223710236427, 3.7669030216280923,
-3.5232105054852991, -3.7071480752824462], [-0.35952695279389246,
2.5451704526750873, -4.2842310996736144, -1.3813503044378783],
[-2.5647173415905145, 4.7437501634141572, -4.2234318870342245,
2.1862042652792866]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.33323555487)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.30721765033724147, -0.46868895060421156,
1.2710935474482046, -4.1567652362379839], [-1.5111806525022469,
1.5392212244299293, 2.1658680029009236, 3.7078137905150044],
[3.4261679982405173, -0.6087360100178234, -2.0137328849152727,
-4.5643697113494133]], [[1.5990800094713826, 3.3721606098154879,
-2.234019215264115, 4.0846084301983794], [0.23098249275551197,
-0.0079776103335049697, -2.541321778289884, -0.4065322648199734],
[4.9446945621265161, 3.8657998316548543, 2.4919435458739141,
4.1977079201335386]]], [[[-2.2620757694405143, 0.97702382211419092,
4.9009717892458333, 3.7440880533744743], [3.3236694077478832,
1.0643685449347693, 2.7586079812098925, 4.1978892251260511],
[-0.92126981378156714, -3.9343350670213226, -3.3244323841003625,
0.038212681320413999]], [[1.2882882777182134, 3.2869589382179747,
-2.3571653762254803, 1.9331212559218178], [-3.3839346651283524,
-0.82459511533255636, 2.097305694598333, -0.81737132341104868],
[1.9059675729759462, 0.51791630256403476, 3.6595322604093852,
-0.88759102673773693]]], [[[0.083195875835665234, -2.2704279890398,
5.0156403011824011, 1.4301275088172467], [1.6804055648302878,
-3.4989524888751737, -3.9477054354762195, 1.6265360909903386],
[-2.524015695458826, 4.0100561378148658, -2.4667597509792163,
2.4449777621365172]], [[-1.8661868161537947, 4.1001385764979403,
-3.1899749506154511, -3.3739125204125981], [-0.026291397924044446,
2.8784060075449354, -3.9509955448037664, -1.0481147495680303],
[-2.2314817867206664, 5.0769857182840052, -3.8901963321643764,
2.5194398201491346]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[1.6204760394819004, -0.95393695229398112, -1.221681223499369, 2.6618713903411937],
[-1.5387523541807724, 4.6220978399651482, -2.1795716817360713, -3.776821154104939], [1.4330066566763016,
3.7880327985429378, -0.65902727001966976, -4.29506128665055]], [[-4.0199255222547103, -3.644811287300751,
3.6508998060332054, -3.569704984460552], [-3.8429890733645489, -2.9119635791576437, 2.3183698092323652,
1.3643661323778851], [2.9328022056563725, -0.080129403375118535, 0.15566128013433289, 2.344258136058456]]],
[[[3.03272210358924, 2.8841814084596393, -4.059068204445289, -0.091640986980607408], [-4.2591024547151859,
-0.36305436045316863, 0.19284537915686428, 4.5041324479849649], [1.2988816365062537, -1.6778808169453416,
-3.5496975707176146, 4.314356820196215]], [[-1.4533462849506518, -1.003910808707118, 3.8948057966291092,
1.266066103629278], [-4.4119138102620346, -2.1246183047037603, -2.4610566322999161, -3.5862383252945271],
[2.9290698526446066, -0.26093763373887136, 0.87809331627623344, -0.47993365832407076]]], [[[2.1717793325666745,
0.83592896851733212, -2.2538107669063279, 1.6303402530881517], [-0.53207705017646578, -4.5214994998308979,
-3.6999121226789988, 3.5355643886671686], [3.3936340080223193, -2.1140030580705247, 1.821327452830638,
-1.6123768640462668]], [[2.3105165926895497, -3.0414367260786292, -1.5788704194425076, 1.0377969965556915],
[1.3575822980511116, 4.3465002873169833, 0.55678010189701688, 4.99079375906609], [4.2819911907361128,
4.9615031124625322, 2.7964852390480104, 0.029646894001982282]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.779495003239937, -4.7840877608643506, 2.651273004571375, -2.179381582597685],
[-0.27370078331190673, -3.6151069379138887, -0.6880481455894909, 4.4373993248198644], [-1.6276288613086387,
-1.6376839670015721, -3.1138607609774835, -2.7809800576738719]], [[0.85446276622548556, -4.3676040003341114,
-4.0083595770538496, -3.915065868011578], [1.6989039436984452, 3.5347026474299419, -1.8748410832866327,
-4.6526613314583045], [1.9480513434936046, 4.7386182205273322, -1.2001630607496541, 1.8094726084650006]]],
[[[4.9996435011863589, 0.60285036470010045, 1.457536438507919, 2.7443970579013879], [4.131864622110669,
0.20996245110639133, 3.3652305004680549, 3.1437873739212119], [-3.0818670302029405, -2.461603163946088,
-0.56609916674720218, -4.1186964404844861]], [[-2.7183232427482262, -2.1509712746053999, -2.281087666097271,
-2.4094567126275344], [-3.4723848022755091, -1.563218902128277, -4.7598832341275878, 1.8751725484288029],
[-4.0474621098792882, 0.59894943914858167, 1.0736279895120182, 4.5015525072725033]]], [[[-3.0082200796749703,
0.23283074563588535, 2.5230303985659734, 4.8262414779000231], [3.3772486493634837, 1.8234317033464915,
-1.7905158376185746, -2.9990918311449244], [-3.6765085717620041, 2.0057610304617572, -2.1487273241068525,
-4.1965541804451352]], [[0.26210933249566715, -2.9167787158271663, -0.89589477578380539, -0.41427249402553912],
[-3.1708181836677332, 4.3890602408555726, -1.1754542095914857, 4.8422639037274919], [-3.0044937138520034,
-4.1626528668210083, 0.20385989364778467, -0.016309737359709864]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.3999710427218375, -5.7380247131583317, 1.429591781072006, 0.48248980774350869],
[-1.8124531374926791, 1.0069909020512595, -2.8676198273255622, 0.66057817071492542], [-0.19462220463233715,
2.1503488315413657, -3.7728880309971533, -7.0760413443244214]], [[-3.1654627560292248, -8.0124152876348624,
-0.35745977102064419, -7.4847708524721295], [-2.1440851296661037, 0.62273906827229819, 0.44352872594573256,
-3.2882951990804195], [4.8808535491499772, 4.6584888171522136, -1.0445017806153212, 4.1537307445234566]]],
[[[8.0323656047755989, 3.4870317731597398, -2.60153176593737, 2.6527560709207805], [-0.12723783260451693,
-0.1530919093467773, 3.5580758796249192, 7.6479198219061768], [-1.7829853936966868, -4.1394839808914297,
-4.1157967374648168, 0.19566037971172889]], [[-4.171669527698878, -3.154882083312518, 1.6137181305318382,
-1.1433906089982564], [-7.8842986125375436, -3.6878372068320373, -7.2209398664275035, -1.7110657768657243],
[-1.1183922572346816, 0.33801180540971032, 1.9517213057882516, 4.0216188489484326]]], [[[-0.83644074710829575,
1.0687597141532175, 0.26921963165964558, 6.4565817309881748], [2.8451715991870179, -2.6980677964844064,
-5.4904279602975734, 0.53647255752224421], [-0.28287456373968478, -0.10824202760876744, -0.3273998712762145,
-5.808931044491402]], [[2.5726259251852168, -5.9582154419057956, -2.474765195226313, 0.62352450253015235],
[-1.8132358856166215, 8.7355605281725559, -0.61867410769446884, 9.833057662793582], [1.2774974768841094,
0.79885024564152385, 3.0003451326957951, 0.013337156642272419]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.50668349593,self.functionspace)
arg0.setTaggedValue(1,-3.09146650776)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.32369560802)
sub=res.substitute({arg1:s1})
ref=Data(-0.81701211209,self.functionspace)
ref.setTaggedValue(1,-7.41516211578)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(3.83444600418,self.functionspace)
arg0.setTaggedValue(1,-0.266863397142)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.6938635924807581, -2.3199399928130826])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.5283095966592981, 1.5145060113654574]),self.functionspace)
ref.setTaggedValue(1,numpy.array([3.4270001953384694, -2.5868033899553713]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(-2.85642807584,self.functionspace)
arg0.setTaggedValue(1,-0.357260114938)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[4.4124412590911621, 1.732298167196193, 1.8228166076040306, -3.9853565905277355,
3.3793508288079881], [-1.5339512663354116, -2.8915144317379058, -3.6493591659102464, 1.4243106283527815,
-0.6931246781623841], [4.7714119110273394, 0.45700055229079606, 1.2539528503924027, -1.4029360809413403,
2.8915917074007416], [4.2546657221847255, 3.2639891865967527, -0.4712967898993945, -3.9077971138749112,
-3.5655383189938084]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.5560131832472779, -1.1241299086476912, -1.0336114682398536, -6.8417846663716197,
0.52292275296410384], [-4.3903793421792958, -5.74794250758179, -6.5057872417541311, -1.4321174474911027,
-3.5495527540062684], [1.9149838351834552, -2.3994275235530882, -1.6024752254514816, -4.2593641567852245,
0.035163631556857311], [1.3982376463408412, 0.40756111075286849, -3.3277248657432787, -6.7642251897187951,
-6.4219663948376926]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[4.0551811441529519, 1.3750380522579828, 1.4655564926658204,
-4.3426167054659457, 3.0220907138697779], [-1.8912113812736218, -3.248774546676116, -4.0066192808484562,
1.0670505134145714, -1.0503847931005943], [4.4141517960891292, 0.099740437352585865, 0.89669273545419248,
-1.7601961958795505, 2.5343315924625314], [3.8974056072465153, 2.9067290716585426, -0.82855690483760469,
-4.2650572288131219, -3.9227984339320185]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-2.98759917871,self.functionspace)
arg0.setTaggedValue(1,-4.26584239637)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[0.65736935684204045, 1.4685807994312459], [0.99740155640158257, -2.8001282911414127]],
[[-0.80947613326718226, -4.0270117786915378], [1.1564198209626229, -4.917538904347448]], [[-1.0488230155998202,
4.0958534641909754], [-4.9502522108275002, -0.19486641488505008]], [[-4.507307254914509, -0.98539101308887389],
[-4.5909807035957675, 2.4265853650826985]], [[-4.252924691613126, 0.42394291278212481], [3.4198717705842103,
-4.6000003047031024]], [[4.9609535782609235, 3.1625779529060711], [0.26834958946896492, 3.0941570460788874]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-2.3302298218695272, -1.5190183792803218], [-1.9901976223099851, -5.7877274698529799]],
[[-3.7970753119787499, -7.0146109574031055], [-1.8311793577489448, -7.9051380830590157]], [[-4.0364221943113883,
1.1082542854794077], [-7.9378513895390679, -3.1824655935966177]], [[-7.4949064336260767, -3.9729901918004416],
[-7.5785798823073351, -0.56101381362886915]], [[-7.2405238703246937, -2.5636562659294428], [0.43227259187264266,
-7.5875994834146701]], [[1.9733543995493559, 0.17497877419450347], [-2.7192495892426027,
0.10655786736731976]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.6084730395261495, -2.7972615969369441], [-3.2684408399666074,
-7.0659706875096031]], [[-5.0753185296353722, -8.2928541750597269], [-3.1094225754055671, -9.183381300715638]],
[[-5.3146654119680097, -0.16998893217721456], [-9.2160946071956893, -4.46070881125324]], [[-8.773149651282699,
-5.2512334094570638], [-8.8568230999639574, -1.8392570312854915]], [[-8.5187670879813169, -3.8418994835860651],
[-0.84597062578397964, -8.8658427010712924]], [[0.69511118189273358, -1.1032644434621188], [-3.997492806899225,
-1.1716853502893025]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-3.36894529378,self.functionspace)
arg0.setTaggedValue(1,-4.62956527999)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-4.6824549992604805, 0.17860523484039881, -3.9939994980255102, -0.36579022311332743],
[-2.0003582573358858, 3.3436256968249793, -1.5671485178714373, 3.9554829351801821], [4.0499415739210693,
-3.1796189569360358, 0.28181611699077536, 1.4851321313182684]], [[4.9608073066477267, 2.1353944107091136,
3.2103965744924743, 0.36273874746876089], [0.33193515801312934, -1.8768462949087295, -3.5968753845201462,
-1.9342255010038101], [-0.98845968068423407, -2.6505467151645048, -3.9269883741621214, -1.2671783073823359]]],
[[[4.0296290320262234, 0.094183089334959114, -1.6548527114390654, 1.1815006848827636], [4.4205350333429578,
1.0602877007979998, -2.7207610093848364, 2.5749353581909009], [2.368743673752042, 0.36879117257479166,
3.1294699111463196, 3.8766421343643209]], [[-4.2994052301352443, -4.4665347726615128, -4.9654257982784813,
1.4010627781386145], [-0.49010647980719568, 1.1149343027340697, 3.8533389980231654, -1.4762647122950145],
[-2.4078638813490985, 4.4431147205208923, 3.0392301612263246, -2.3032611338556377]]], [[[1.1388924488325571,
4.4978561941078308, -3.3123851704811691, 1.3453478111463726], [4.1779635175178385, 3.1786527767023234,
-2.8109803623964669, 4.7217176158252876], [0.26914741902392958, -1.6630169842885789, -3.6267544687045641,
-4.7016327677304943]], [[0.44478691577550755, 2.9451130426961889, -1.0836274217802466, -4.8754431681482586],
[1.6457024072282014, -1.106310648992209, -3.2732924796145912, 4.7940609535301668], [-4.2482158844391957,
2.2391243759174451, 4.6408645091714327, 4.1449515947243611]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-8.0514002930449351, -3.1903400589440558, -7.3629447918099649, -3.734735516897782],
[-5.3693035511203409, -0.025319596959475277, -4.9360938116558923, 0.5865376413957275], [0.68099628013661473,
-6.5485642507204904, -3.0871291767936793, -1.8838131624661862]], [[1.5918620128632721, -1.233550883075341,
-0.15854871929198033, -3.0062065463156937], [-3.0370101357713253, -5.2457915886931836, -6.9658206783046008,
-5.3031707947882651], [-4.3574049744686887, -6.0194920089489594, -7.2959336679465761, -4.6361236011667906]]],
[[[0.66068373824176874, -3.2747622044494955, -5.0237980052235205, -2.187444608901691], [1.0515897395585032,
-2.3086575929864548, -6.0897063031692911, -0.79400993559355371], [-1.0002016200324126, -3.000154121209663,
-0.23947538263813506, 0.5076968405798663]], [[-7.668350523919699, -7.8354800664459674, -8.3343710920629359,
-1.9678825156458402], [-3.8590517735916503, -2.2540109910503849, 0.48439370423871075, -4.8452100060794692],
[-5.7768091751335531, 1.0741694267364377, -0.32971513255813001, -5.6722064276400923]]], [[[-2.2300528449518975,
1.1289109003233762, -6.6813304642656242, -2.023597482638082], [0.80901822373338383, -0.19029251708213124,
-6.1799256561809219, 1.352772322040833], [-3.099797874760525, -5.0319622780730331, -6.9956997624890187,
-8.0705780615149489]], [[-2.9241583780089471, -0.42383225108826572, -4.4525727155647008, -8.2443884619327132],
[-1.7232428865562532, -4.4752559427766636, -6.6422377733990459, 1.4251156597457122], [-7.6171611782236504,
-1.1298209178670096, 1.2719192153869781, 0.77600630093990652]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-9.3120202792456048, -4.4509600451447255, -8.6235647780106355,
-4.9953555030984518], [-6.6299235373210106, -1.285939583160145, -6.1967137978565621, -0.67408234480494222],
[-0.579623706064055, -7.8091842369211601, -4.347749162994349, -3.144433148666856]], [[0.33124202666260238,
-2.4941708692760107, -1.4191687054926501, -4.2668265325163635], [-4.297630121971995, -6.5064115748938534,
-8.2264406645052706, -6.5637907809889349], [-5.6180249606693584, -7.2801119951496291, -8.5565536541472458,
-5.8967435873674603]]], [[[-0.59993624795890099, -4.5353821906501652, -6.2844179914241902, -3.4480645951023607],
[-0.20903024664216652, -3.5692775791871245, -7.3503262893699608, -2.0546299217942234], [-2.2608216062330824,
-4.2607741074103327, -1.5000953688388048, -0.75292314562080342]], [[-8.9289705101203687, -9.0961000526466371,
-9.5949910782636056, -3.2285025018465099], [-5.11967175979232, -3.5146309772510547, -0.77622628196195897,
-6.1058299922801389], [-7.0374291613342228, -0.18645055946423206, -1.5903351187587997, -6.932826413840762]]],
[[[-3.4906728311525672, -0.13170908587729357, -7.9419504504662939, -3.2842174688387518], [-0.45160176246728589,
-1.450912503282801, -7.4405456423815917, 0.092152335840163246], [-4.3604178609611948, -6.2925822642737028,
-8.2563197486896875, -9.3311980477156187]], [[-4.1847783642096168, -1.6844522372889355, -5.7131927017653705,
-9.505008448133383], [-2.983862872756923, -5.7358759289773333, -7.9028577595997156, 0.16449567354504246],
[-8.8777811644243201, -2.3904409040676793, 0.011299229186308324, -0.48461368526076321]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-4.9434811071655114, 1.7588416724781917]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.0524482361043965, -0.58828792238396233]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.86003727467)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-9.8035183818403411, -3.1011956021966389]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-1.8075890385704341, -5.4483251970587929]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.47124983588436109, 3.3842142103059487]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([4.4506172428158504, -1.5976912605342894]))
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([2.7380372395241483, -1.2414970456241372])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([3.2092870754085094, 2.1427171646818115]),self.functionspace)
ref.setTaggedValue(1,numpy.array([7.1886544823399987, -2.8391883061584267]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[3.7123556177495072, -1.2322724929891438, -4.3196981967098704, 4.5149190397092358,
-3.4294461596271342], [-0.32526237821140569, 4.906418518064358, 1.6782843293160443, -4.5452294423093242,
-3.4252951962126454], [4.7623389482797158, 4.8957853100883888, 2.4605965522735644, -3.3235939770772349,
-3.6622677868193731], [3.7849671492059009, -3.7965523255405484, -0.98706292680421903, -2.9575953641431996,
3.7235194699440495]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[3.846235478086534, -2.9152984736534773, 2.1299170235868692,
1.4194093106373815, -1.9728564928751369], [0.12730504885223404, -2.4537968289763077, 1.8352652361138375,
-1.1054616749639532, -0.67553225283567997], [-4.6542627767136047, 0.014905560429250286, 0.84138572626791408,
-1.4074784720342515, -3.3322631066777983], [-0.64893500421415951, 4.4524265176475826, -3.5204114624144456,
3.5239615703390363, 2.3718443568961201]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.4845259086)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.1968815263516515, 2.2522534156130005, -0.83517228810772615, 7.9994449483113801,
0.055079748975010112], [3.1592635303907386, 8.3909444266665023, 5.1628102379181886, -1.06070353370718,
0.059230712389498841], [8.2468648568818601, 8.3803112186905331, 5.9451224608757087, 0.16093193152490937,
-0.17774187821722887], [7.2694930578080452, -0.31202641693840416, 2.4974629817979253, 0.52693054445894472,
7.2080453785461938]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[7.3307613866886783, 0.56922743494866701, 5.6144429321890135,
4.9039352192395258, 1.5116694157270074], [3.6118309574543783, 1.0307290796258366, 5.3197911447159818,
2.3790642336381911, 2.8089936557664643], [-1.1697368681114604, 3.4994314690313946, 4.3259116348700584,
2.0770474365678928, 0.15226280192434594], [2.8355909043879848, 7.9369524262497269, -0.035885553812301296,
7.0084874789411806, 5.8563702654982643]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[2.7675952117994296, 0.98431175880226363, -1.8309000840442566, 2.0351166910383416,
2.1718600084175153], [0.64718493825654111, 3.0274641310077364, 4.6031246235215555, -0.072830522019846633,
-3.436466903373192], [-2.7989895712459734, 3.2804563231391093, 3.1416998470123456, 0.25702028842752966,
-3.1553411419958821], [-4.5620989116806543, -0.23300222673645532, -2.3978689464069101, 0.41391436589174457,
-3.7252639362836382]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-2.1509506437818238, -2.5007519800405218, 0.30616207266744233,
-0.46790716227581797, 0.6454558125610621], [1.9589653025955753, -4.9059174981425437, -4.7107956989445992,
2.6150016745692826, -3.3329567586885211], [1.1850451086308738, 3.8781029980110997, -4.7104324292639133,
-4.8362413881812492, 4.9066980390674555], [-1.2440311634968171, -1.6429522113717008, 4.0547225056117124,
-0.33314796054153195, -2.6143781039708855]]))
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-0.0104190624259477, 3.439083370835446, -1.7585221913131677, 3.8784501968475897,
0.08088556648108991], [0.53276272310770789, -1.3171951284400176, -0.841014288686317, 2.4350359443944622,
0.55796159262639922], [-3.3985580423616479, 0.73804937880111687, 0.84641655693241269, -2.0376479444757822,
-0.094456394031885438], [0.8829252865168975, 0.84170422580042903, -1.9539396350167637, -4.8054718599517194,
-0.37594711864698205]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.7571761493734819, 4.4233951296377096, -3.5894222753574243, 5.9135668878859313,
2.2527455748986052], [1.179947661364249, 1.7102690025677187, 3.7621103348352385, 2.3622054223746156,
-2.8785053107467928], [-6.1975476136076217, 4.0185057019402262, 3.9881164039447583, -1.7806276560482526,
-3.2497975360277676], [-3.6791736251637568, 0.60870199906397371, -4.3518085814236738, -4.3915574940599749,
-4.1012110549306202]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-2.1613697062077715, 0.93833139079492422, -1.4523601186457253,
3.4105430345717718, 0.72634137904215201], [2.4917280257032832, -6.2231126265825614, -5.5518099876309162,
5.0500376189637448, -2.7749951660621219], [-2.2135129337307742, 4.6161523768122166, -3.8640158723315006,
-6.8738893326570309, 4.8122416450355701], [-0.36110587697991958, -0.80124798557127175, 2.1007828705949487,
-5.1386198204932514, -2.9903252226178676]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[1.6094791339338048, 4.27222307751477], [4.9486531857239697, -4.5552975586923292]],
[[-0.12032729123703056, -4.1413061177629231], [-2.7473350985925316, 4.7319188820310991]], [[0.13107637034429231,
-3.2138415379490204], [-3.9942457581718696, 1.3262496008026838]], [[2.56850905863657, 1.8321753808437329],
[4.5176482730823331, 4.4664637318837137]], [[0.50860355331966556, 0.55279434819439199], [3.1688695988617859,
-2.6740526298455016]], [[4.4977965557520072, 3.6422271944652209], [3.7948343945899445,
-3.0377990068633332]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[-2.9548694146760557, 3.1101651017467038], [-0.31006440672923752,
0.74616091042484989]], [[-3.1016477433464864, 2.9532816390640111], [-2.0494474684559894, -1.1448583599993354]],
[[4.2052724347365604, -1.8157003708847643], [4.8073133555422327, -2.7045312989764492]], [[-2.3803833325202763,
0.19928505008920272], [-2.8622812030202094, 3.9488692362256081]], [[-4.1266217915470236, 4.8461083576413735],
[-3.1895474177762351, 4.4625154514412237]], [[-0.65350755924337811, 2.8015786665738105], [0.94103003425367859,
0.27556367440023166]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.49324308458)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.1027222185118468, 8.765466162092812], [9.4418962703020117, -0.062054474114287217]],
[[4.3729157933410114, 0.35193696681511888], [1.7459079859855104, 9.2251619666091411]], [[4.6243194549223343,
1.2794015466290216], [0.49899732640617245, 5.8194926853807258]], [[7.061752143214612, 6.3254184654217749],
[9.0108913576603751, 8.9597068164617557]], [[5.0018466378977076, 5.046037432772434], [7.6621126834398279,
1.8191904547325404]], [[8.9910396403300492, 8.1354702790432629], [8.2880774791679865,
1.4554440777147089]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.5383736699019863, 7.6034081863247458], [4.1831786778488045,
5.2394039950028919]], [[1.3915953412315556, 7.4465247236420531], [2.4437956161220526, 3.3483847245787066]],
[[8.6985155193146024, 2.6775427136932777], [9.3005564401202747, 1.7887117856015928]], [[2.1128597520577657,
4.6925281346672447], [1.6309618815578326, 8.4421123208036501]], [[0.36662129303101842, 9.3393514422194155],
[1.3036956668018069, 8.9557585360192657]], [[3.8397355253346639, 7.2948217511518525], [5.4342731188317206,
4.7688067589782737]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.7345315461324993, 4.5316724428402377], [-1.2207000383039999, -2.1651454481686692]],
[[-2.5222456135735638, 3.1325113872519896], [0.54140311786327011, -1.6266115642059011]], [[4.3999274072752783,
-0.64510581732829841], [-3.3878893926233533, -0.14783111107246061]], [[2.4816188811184228, 1.505965932327137],
[-2.8128544405052458, 3.2460332510852936]], [[1.5649806120186849, 1.1768584297160487], [-3.3133262672401544,
-2.5740884272652789]], [[2.936076596237732, -0.80694051724477056], [1.6382059835800931,
-0.059174653042079584]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.107948776768561, 4.79459166600315], [-0.070211802843057391,
-2.3000592273671394]], [[1.53142006950028, 0.5983353676488381], [4.2000369856633419, -3.7326077043834074]],
[[-3.6852528003303684, -0.40061815593309014], [4.849947657932514, 3.2046322763443698]], [[4.6824735127774275,
-2.3356975272114679], [-1.4284737023138216, -0.96863966970867921]], [[4.4306883649430571, 0.16250464015770305],
[4.7866411719098583, -1.6949698779239197]], [[-4.9624929004021014, -0.4120760567738655], [-3.510925072784119,
-0.26388846668772636]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.7560333190798687, 0.63030183757017788], [-3.8821224320935288, 4.3508142113739634]],
[[4.3548667192676795, -3.4709315123037445], [-0.19540447292770935, -1.1720138856956916]], [[3.7993994701980398,
-4.5475458462287497], [-0.20650310401114513, -2.7802894344079201]], [[-0.46867874332271242, 0.82685022383334505],
[-3.5357776147305264, 0.7633420403065605]], [[-0.19578164461526359, -4.1370261640670458], [-1.2073883253186946,
0.74664652191646397]], [[-0.697880661399644, -0.46932885527321488], [2.4087818009804716, -1.8245102799854829]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.0215017729473694, 5.1619742804104156], [-5.1028224703975287, 2.1856687632052942]],
[[1.8326211056941157, -0.33842012505175489], [0.34599864493556076, -2.7986254499015928]], [[8.1993268774733181,
-5.1926516635570481], [-3.5943924966344984, -2.9281205454803807]], [[2.0129401377957103, 2.3328161561604821],
[-6.3486320552357718, 4.0093752913918541]], [[1.3691989674034213, -2.9601677343509971], [-4.520714592558849,
-1.8274419053488149]], [[2.238195934838088, -1.2762693725179854], [4.0469877845605646,
-1.8836849330275625]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[7.8639820958484297, 5.4248935035733279], [-3.9523342349365862,
2.050754984006824]], [[5.8862867887679595, -2.8725961446549064], [4.0046325127356326, -4.904621590079099]],
[[0.11414666986767141, -4.9481640021618398], [4.6434445539213689, 0.42434284193644967]], [[4.2137947694547151,
-1.5088473033781229], [-4.9642513170443481, -0.20529762940211871]], [[4.2349067203277935, -3.9745215239093428],
[3.5792528465911637, -0.94832335600745576]], [[-5.6603735618017454, -0.88140491204708038], [-1.1021432718036475,
-2.0883987466732092]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[1.2403285479679145, -0.65355746314869823, 0.23507371305026048, 2.9495208917061202],
[-4.4153187452600653, -1.0271324152128747, 3.6087985228033794, 1.587633224392107], [1.5882989512534262,
-2.3766989521547401, -4.6462509853387939, 1.1425676014861166]], [[-4.8469447836806694, -1.4338245370809863,
-4.8441809139347694, 0.082128480181090424], [4.2695412477206585, -2.0376229192188622, -2.685821131586259,
-4.5654361329152717], [3.5226403567783482, -4.9633770210253347, 4.1637469549065127, -3.5898874968684167]]],
[[[2.7439089503129228, 0.81346375693975492, -2.576882111469688, 4.758878084101946], [0.098363354586225249,
-4.314913184354209, -1.1821682575010484, 4.9687115939178916], [-2.5414207769554564, 1.9836872846103208,
-1.5982744174212127, 4.5509211096426121]], [[4.759533396882766, -4.550347299113696, 4.9394743649799153,
-3.9692445921595421], [1.5755016838325195, 2.6599597206311305, -0.59545966103916648, -1.308464088815966],
[1.7018715016873482, 0.31781368103450536, -0.91184792887657995, -0.60566457689943931]]], [[[-0.365764084374395,
-0.75878286483821444, -3.1104661623240091, -3.7302303444372109], [0.58052395594970907, 0.14085590954626337,
4.6712439745076182, 0.65991412045590181], [-4.5675491076195733, -3.3042112830144132, -2.6719400309110553,
-3.8520603991598765]], [[3.4260488825099618, -1.2789319515430164, 1.8435112511824903, 1.0773214658952854],
[-4.0772283149901236, 1.0211433275718873, -2.015430043082814, 0.1376630245430368], [1.3249956905172624,
3.1987247807146968, 1.0304156332749459, 3.785256475561086]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.8774766185796605, 3.1521364883779448, -4.9233158714840091,
3.7988193665209522], [4.8244393256113263, 2.4688683468745563, -4.5044275072582254, 1.1107496985072052],
[-2.9980383766650376, -4.2922660982517158, 3.4924104659712771, -0.5135964311738892]], [[1.9573144047865201,
-2.2686101409008961, -2.907052414660404, -4.0582253229051144], [-2.0281877168409657, 1.7867206317317663,
0.018511114285918673, -4.0475974398672498], [1.3023403490307315, 1.9932255873687215, -4.6698465653310688,
-4.5630845029599421]]], [[[-1.9525649263627876, -0.72040110769848908, -3.6987029249472769, -3.3184217891099999],
[-4.0519149413902857, 4.1195877398536549, -3.8261874289376463, 3.423780007792768], [0.11768639970294359,
-1.4898880703788131, -1.1746648112150213, -0.28493737967147226]], [[-2.0138403307539932, 3.9987186392010816,
-1.0125535260055338, 0.57376641241565363], [4.213727608092972, 0.51388058678005066, -4.4106027756910908,
-1.9979423050108283], [1.5708368447511347, -1.6270284297780933, -0.55277364435139376, -1.7748804647831715]]],
[[[2.7639070541103061, 2.7303808332951629, 0.41148416591473591, -1.9337000414572802], [-2.7585163378482456,
2.2319457297797207, 3.7988668025967804, 3.6103374331669471], [-4.5925114196923271, -2.1274746711435997,
3.3094547630756779, -4.1386856959210352]], [[-2.1348423629137692, 3.539794593057783, 4.8265405725541157,
4.9426398297282788], [4.5757071915543417, -4.0433372993763399, -0.84096548582416997, 2.0567811910343226],
[4.5367596882428671, -4.9139510999364404, 1.1342166543217944, 1.4859311895053571]]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.83582066753)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.0761492154946453, 4.1822632043780326, 5.0708943805769913, 7.785341559232851],
[0.4205019222666655, 3.8086882523138561, 8.4446191903301102, 6.4234538919188378], [6.424119618780157,
2.4591217153719906, 0.18956968218793691, 5.9783882690128474]], [[-0.011124116153938601, 3.4019961304457444,
-0.008360246408038563, 4.9179491477078212], [9.1053619152473892, 2.7981977483078686, 2.1499995359404718,
0.27038453461145906], [8.358461024305079, -0.12755635349860395, 8.9995676224332435, 1.2459331706583141]]],
[[[7.5797296178396536, 5.6492844244664857, 2.2589385560570427, 9.5946987516286768], [4.934184022112956,
0.52090748317252178, 3.6536524100256824, 9.8045322614446224], [2.2943998905712744, 6.8195079521370516,
3.2375462501055181, 9.3867417771693429]], [[9.5953540644094968, 0.28547336841303483, 9.7752950325066461,
0.86657607536718873], [6.4113223513592503, 7.4957803881578613, 4.2403610064875643, 3.5273565787107648],
[6.537692169214079, 5.1536343485612361, 3.9239727386501508, 4.2301560906272915]]], [[[4.4700565831523358,
4.0770378026885163, 1.7253545052027217, 1.1055903230895199], [5.4163446234764399, 4.9766765770729942,
9.507064642034349, 5.4957347879826326], [0.26827155990715745, 1.5316093845123175, 2.1638806366156755,
0.98376026836685426]], [[8.2618695500366925, 3.5568887159837144, 6.679331918709221, 5.9131421334220162],
[0.75859235253660717, 5.8569639950986181, 2.8203906244439167, 4.9734836920697676], [6.1608163580439932,
8.0345454482414276, 5.8662363008016767, 8.6210771430878168]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.95834404894707026, 7.9879571559046756, -0.087495203957278278,
8.634640034047683], [9.6602599931380571, 7.304689014401287, 0.3313931602685054, 5.946570366033936],
[1.8377822908616932, 0.543554569275015, 8.3282311334980079, 4.3222242363528416]], [[6.7931350723132509,
2.5672105266258347, 1.9287682528663268, 0.77759534462161639], [2.8076329506857651, 6.6225412992584971,
4.8543317818126495, 0.78822322765948094], [6.1381610165574623, 6.8290462548954523, 0.165974102195662,
0.27273616456678873]]], [[[2.8832557411639432, 4.1154195598282417, 1.1371177425794539, 1.5173988784167309],
[0.78390572613644505, 8.9554084073803857, 1.0096332385890845, 8.2596006753194988], [4.9535070672296744,
3.3459325971479177, 3.6611558563117095, 4.5508832878552585]], [[2.8219803367727376, 8.8345393067278124,
3.823267141521197, 5.4095870799423844], [9.0495482756197028, 5.3497012543067815, 0.42521789183563996,
2.8378783625159025], [6.4066575122778655, 3.2087922377486375, 4.283047023175337, 3.0609402027435593]]],
[[[7.5997277216370369, 7.5662015008218937, 5.2473048334414667, 2.9021206260694505], [2.0773043296784852,
7.0677663973064515, 8.6346874701235112, 8.4461581006936779], [0.24330924783440366, 2.7083459963831311,
8.1452754306024087, 0.6971349716056956]], [[2.7009783046129616, 8.3756152605845138, 9.6623612400808465,
9.7784604972550095], [9.4115278590810725, 0.79248336815039089, 3.9948551817025608, 6.8926018585610533],
[9.3725803557695979, -0.078130432409709627, 5.9700373218485252, 6.3217518570320879]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[-3.1509236523814286, 1.680234058442708, -1.7187977550532416, 3.9846453843972913],
[-1.6754979614332322, -3.8450074807346901, -1.5740330789137689, -4.4201074343218751], [2.276529915966389,
-0.80235747833916982, 4.571247045598767, -3.4093255486695617]], [[-4.0166628667791446, -1.3933240066153738,
-1.215071574667598, -3.4706735067142258], [-3.0960303329082572, 4.3009033191704589, 4.4065883064621634,
4.8965445768019009], [-4.4443460968929758, 3.8975314333052253, -4.4153045047286144, 1.7496820405056166]]],
[[[1.634274247051799, -2.4623052709302771, 1.4279180811059975, 0.92544783745377668], [-4.4862942162658106,
-0.17080151547727951, 0.52532922395695625, -0.11419327223481623], [-1.1603038628614835, -2.5757515035829472,
1.9959550719114718, -1.7953240768392242]], [[4.9309159450812103, 3.2298165897638906, -0.075208625571880461,
-1.1899071115534432], [1.6545058865005409, -1.9426363189361773, 1.620629502101667, -4.2257681218133687],
[-0.24689686416986767, 2.1247379677905815, -0.022501917990521925, -1.9988138278359822]]], [[[-2.16170138942825,
1.2184335532362125, 1.1509535832826323, 2.2195238124001797], [2.7455643566460015, 4.6453581322389361,
-4.1082447076462643, -4.0639146315693067], [-4.96116105494092, -3.6915142795866762, -1.2186796693827917,
4.7933913234222967]], [[2.0022553772723217, -0.96891528014022654, -2.5457411370843142, -3.3574915783043058],
[0.10326637441549735, 2.2065594442944327, 3.4159550457557479, -0.71182719653128945], [-1.5473005591196651,
-1.8237704422942014, 3.7660184612895105, -2.1565964302540372]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-2.7644812297675436, 0.40931971763703956, 3.611075059192606,
0.50972765741910564], [-4.2130726841282584, -1.1190277433669751, -0.71203745760782766, -3.152956525368753],
[-1.6186056313723087, 1.1274726343098616, 4.4133392834898437, 1.5220424195160689]], [[0.16147933294385375,
2.4654462130650998, -2.2315133839410328, -4.5248215067907562], [2.2226933853289026, 3.7083490689582508,
1.6042940030913613, 0.26178935291219929], [2.4033332562872989, 2.6116613010273229, -3.5340848426974594,
-4.3871506552920767]]], [[[-2.5011422414749243, -2.9785737952530678, -4.0632268435384287, -2.9061747268645899],
[-3.4361922491984487, 0.92512310228203631, -3.7591410062368915, -0.10199113857196274], [1.4370716393838645,
0.71874746237537668, -4.5480615526025323, -3.9385610102938093]], [[-3.5039474073115562, 1.4740925776889409,
-0.06403798877318323, -3.3828440686373753], [-1.9590119108809123, -0.13446729158123816, -2.4360152863347251,
0.81375486060557112], [2.4638296949211451, 0.84554464160795018, 1.0770605717668191, 0.90311465710515648]]],
[[[-3.0365259446312756, -2.1113062138954444, 3.190598106141481, 4.7146234105400531], [4.7073713389281071,
2.0949812753843036, 1.902801485931489, -0.4384294077249864], [-4.4341512258710214, 4.114619941421422,
4.1663347911930675, -0.082374028629738305]], [[-0.58950965471106098, -1.9744112566224792, -0.0098348725084971278,
2.3871548847218813], [-1.1861224380121662, -3.8703032573387253, 0.2332725218101972, 2.7881117501797101],
[-4.3313677243610327, 2.5428749523942127, 3.9018944633638419, -0.49408732338659789]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.8433628252117984, 1.5322432245117268, 0.55363793461945665, 4.6657626927783653],
[-0.94710403494804751, 3.9800168829397649, 3.0366988370600794, 2.8875431155604332], [-1.188024345098996,
1.0665386751463011, 4.7835901054797993, 2.5969696632689807]], [[-1.99850752062535, 1.1333681341555639,
-0.49718999089842697, 1.1440753369804515], [0.26294280812698378, -3.8684363170040701, 0.061030108864615684,
-4.1179127492349608], [-4.67031644465197, 4.9054510497550492, -0.2640662442281041, 1.363134852748785]]],
[[[-1.4621905107325697, -2.8811881835070574, -2.0127263016810106, 3.9187151372775499], [4.0559843147336121,
3.8748150284806506, -4.7195991819934049, 1.6441241199343715], [1.1018797372155733, 1.5720711461020827,
-2.8718182782954003, -2.4926472889456743]], [[2.1583981297206112, -2.7029142786449709, -4.0306810999276212,
-0.041927417439557857], [2.5297094316362001, 3.2023688131127575, -0.87830172094753056, 1.5087811969314782],
[0.94040146920827272, 1.8042467131134678, 2.6306472495122346, 0.16819275341523543]]], [[[0.15798239523545377,
2.4104584738150319, 2.3850248364278386, 3.2174938931658534], [4.8575582926065533, 0.30772922316230389,
-4.4397211951638047, 0.39063821497748741], [-2.3146321369181688, -3.0703095447217885, 1.7397877979741549,
4.033153568325778]], [[-1.7935270727714037, -3.9682025038313595, -3.4065483616803141, 2.1844510922893523],
[-4.2449404804537032, 1.9572337718531996, -4.6593011375931308, 0.98236210083608633], [4.8624542464851288,
0.5657266529616205, 0.50114562982511135, -3.2736237576584317]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-1.3075608271696302, 3.2124772829544348, -1.165159820433785, 8.6504080771756566],
[-2.6226019963812797, 0.13500940220507474, 1.4626657581463105, -1.5325643187614419], [1.088505570867393,
0.26418119680713126, 9.3548371510785664, -0.81235588540058101]], [[-6.0151703874044946, -0.25995587245980989,
-1.7122615655660249, -2.3265981697337743], [-2.8330875247812735, 0.43246700216638878, 4.4676184153267791,
0.77863182756694016], [-9.1146625415449449, 8.8029824830602745, -4.6793707489567185, 3.1128168932544016]]],
[[[0.17208373631922935, -5.3434934544373345, -0.58480822057501314, 4.8441629747313266], [-0.4303099015321985,
3.7040135130033711, -4.1942699580364486, 1.5299308476995552], [-0.058424125645910152, -1.0036803574808646,
-0.87586320638392845, -4.2879713657848981]], [[7.0893140748018215, 0.52690231111891972, -4.1058897254995017,
-1.2318345289930011], [4.184215318136741, 1.2597324941765802, 0.74232778115413645, -2.7169869248818905],
[0.69350460503840505, 3.9289846809040494, 2.6081453315217127, -1.8306210744207467]]], [[[-2.0037189941927962,
3.6288920270512444, 3.5359784197104709, 5.4370177055660331], [7.6031226492525548, 4.95308735540124,
-8.5479659028100698, -3.6732764165918192], [-7.2757931918590888, -6.7618238243084647, 0.52110812859136324,
8.8265448917480747]], [[0.20872830450091806, -4.9371177839715861, -5.9522894987646282, -1.1730404860149535],
[-4.1416741060382058, 4.1637932161476323, -1.2433460918373829, 0.27053490430479687], [3.3151536873654637,
-1.2580437893325809, 4.2671640911146218, -5.430220187912469]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.92111840455574523, 1.9415629421487663, 4.1647129938120626,
5.1754903501974709], [-5.1601767190763059, 2.8609891395727898, 2.3246613794522517, -0.26541340980831984],
[-2.8066299764713047, 2.1940113094561626, 9.1969293889696431, 4.1190120827850496]], [[-1.8370281876814962,
3.5988143472206637, -2.7287033748394598, -3.3807461698103047], [2.4856361934558864, -0.16008724804581931,
1.665324111955977, -3.8561233963227615], [-2.2669831883646712, 7.5171123507823721, -3.7981510869255635,
-3.0240158025432917]]], [[[-3.9633327522074939, -5.8597619787601252, -6.0759531452194393, 1.0125404104129601],
[0.61979206553516342, 4.7999381307626869, -8.4787401882302973, 1.5421329813624087], [2.5389513765994378,
2.2908186084774593, -7.4198798308979326, -6.4312082992394837]], [[-1.345549277590945, -1.22882170095603,
-4.0947190887008045, -3.4247714860769332], [0.57069752075528779, 3.0679015215315193, -3.3143170072822556,
2.3225360575370493], [3.4042311641294178, 2.649791354721418, 3.7077078212790537, 1.0713074105203919]]],
[[[-2.8785435493958218, 0.29915225991958749, 5.5756229425693196, 7.9321173037059065], [9.5649296315346604,
2.4027104985466075, -2.5369197092323157, -0.047791192747498989], [-6.7487833627891902, 1.0443103966996334,
5.9061225891672224, 3.9507795396960397]], [[-2.3830367274824646, -5.9426137604538383, -3.4163832341888112,
4.5716059770112336], [-5.4310629184658694, -1.9130694854855257, -4.4260286157829336, 3.7704738510157965],
[0.53108652212409613, 3.1086016053558332, 4.4030400931889533, -3.7677110810450296]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-0.481249850026)+(1.-msk_arg0)*(-1.48465416864)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-2.65110429185)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(-3.13235414188)+(1.-msk_ref)*(-4.13575846049)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(1.13411439983)+(1.-msk_arg0)*(-0.629637549331)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([-0.62992419613163175, 4.55886114005793])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.50419020369403444, 5.6929755398835962])+(1.-msk_ref)*numpy.array([-1.259561745462479,
3.9292235907270827])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.01809294358)+(1.-msk_arg0)*(0.889743657807)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-2.793178683106079, -2.6222774715493582, 1.0142792223620747, -3.0640922264732984,
-2.3554298671206055], [0.088775964219395043, 3.4441381957619619, 3.3892189758872853, 2.7423767697866088,
3.977644321141641], [1.4526982641352157, 2.2184052986969505, -3.952710218879385, -4.7169576073736375,
-0.7937042808225101], [2.2686916098744314, -1.553248315886353, -2.7367045745859819, 3.7958840729585344,
1.4548199443717298]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[0.22491426047411567, 0.39581547203083645, 4.0323721659422693, -0.045999282893103732,
0.66266307645958911], [3.1068689077995897, 6.4622311393421565, 6.4073119194674799, 5.7604697133668035,
6.9957372647218357], [4.4707912077154104, 5.2364982422771451, -0.93461727529919036, -1.6988646637934428,
2.2243886627576845], [5.2867845534546261, 1.4648446276938416, 0.28138836899421271, 6.813977016538729,
4.4729128879519244]])+(1.-msk_ref)*numpy.array([[-1.9034350252987218, -1.732533813742001, 1.9040228801694319,
-2.1743485686659412, -1.4656862093132483], [0.97851962202675224, 4.3338818535693191, 4.2789626336946425,
3.632120427593966, 4.8673879789489982], [2.3424419219425729, 3.1081489565043077, -3.0629665610720278,
-3.8272139495662802, 0.096039376984847102], [3.1584352676817886, -0.66350465807899583, -1.8469609167786247,
4.6856277307658916, 2.344563602179087]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-4.98444562132)+(1.-msk_arg0)*(4.30756765987)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[1.9993822405268356, -3.1230808428690615], [4.9036400439562815, -4.8838867997176525]],
[[0.42763250705520939, 1.7579324334230453], [-3.7242679708963458, 1.8833596506298056]], [[-3.5481907533254931,
0.2040318933875751], [-2.5124574767604746, -4.1576503017979416]], [[2.4187154671810562, -0.51775884222858526],
[-1.722028671225063, 4.8177194310600537]], [[3.5460779618762999, 3.7426721831596925], [-3.14876579453641,
-1.8491069265603413]], [[-2.0602497125201733, 1.8445672729830882], [2.6289048953955998, -2.1171625740448654]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-2.9850633807947604, -8.1075264641906575], [-0.080805577365314463,
-9.8683324210392485]], [[-4.5568131142663866, -3.2265131878985507], [-8.7087135922179417, -3.1010859706917904]],
[[-8.5326363746470886, -4.7804137279340209], [-7.4969030980820701, -9.1420959231195376]], [[-2.5657301541405397,
-5.5022044635501812], [-6.7064742925466589, -0.16672619026154223]], [[-1.4383676594452961, -1.2417734381619034],
[-8.1332114158580069, -6.8335525478819372]], [[-7.0446953338417693, -3.1398783483385078], [-2.3555407259259962,
-7.1016081953664614]]])+(1.-msk_ref)*numpy.array([[[6.3069499004015404, 1.1844868170056433], [9.2112077038309863,
-0.57631913984294769]], [[4.7352001669299142, 6.0655000932977501], [0.58329968897835904, 6.1909273105045104]],
[[0.75937690654921175, 4.5115995532622799], [1.7951101831142302, 0.14991735807676321]], [[6.726283127055761,
3.7898088176461195], [2.5855389886496418, 9.1252870909347585]], [[7.8536456217510047, 8.0502398430343973],
[1.1588018653382948, 2.4584607333143635]], [[2.2473179473545315, 6.152134932857793], [6.9364725552703046,
2.1904050858298394]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.9697925334)+(1.-msk_arg0)*(-4.26135335725)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.9689996783063126, 2.6024749301521517, -2.8657897182202263, 3.4523361907793202],
[1.0646468808240472, 2.2809214673673006, 1.9110441510817342, 3.6637536830808415], [-4.8161620946685977,
1.1260192950202335, -1.5444099528131283, 4.5856953227320361]], [[3.4807853259935388, 1.0632821522370133,
-1.7813251042294, 0.96803702807832348], [-2.2395880868316476, 4.8919502166960243, 3.0915081953974273,
-0.85921425228962178], [-0.24500754865585961, -3.000069805276242, -2.3285433357124861, -3.7526812827715004]]],
[[[-2.6148866735769314, -2.9426881222754986, -2.1105189060422127, -1.718323686970705], [0.38236683235255065,
4.8146833101999391, -0.69724678041282662, -3.674837501299455], [-1.1217878757973345, 1.9457797122429064,
4.3330454272287042, 1.2870165165330079]], [[0.90390350707926448, 4.0932246664578322, 4.0170833493811937,
2.3057200276883218], [-4.1149618340720506, 4.3206785552080422, 4.5478406361616468, 3.4270491303459689],
[-3.2122582790653578, -0.051138136931458078, 2.847106348954056, -2.0922906343243097]]], [[[-3.8470709835005801,
0.79389346854249432, 1.9702586564654192, -1.230993932131331], [0.52027641197917784, 4.1606002966489264,
-4.1240899145057277, 3.0855602864655047], [1.2434749670286918, 1.9421106344042691, -4.7997149299258455,
-3.1016051858236517]], [[-4.0158867307020536, -1.2810983979769732, 4.1806447574751786, 2.4159993753375488],
[3.8210591526688589, 2.9170696329659753, 0.212629682453775, -3.6791629346607402], [-0.52709663403725493,
-2.0893727810689953, -1.7473644406170976, -4.1869442335699976]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[0.99920714490574225, -0.36731760324841867, -5.8355822516207967, 0.48254365737874982],
[-1.9051456525765231, -0.68887106603326975, -1.0587483823188362, 0.69396114968027112], [-7.7859546280691685,
-1.8437732383803369, -4.5142024862136987, 1.6159027893314657]], [[0.51099279259296848, -1.9065103811635571,
-4.7511176376299709, -2.0017555053222469], [-5.2093806202322179, 1.9221576832954539, 0.12171566199685691,
-3.8290067856901921], [-3.21480008205643, -5.9698623386768119, -5.2983358691130569, -6.7224738161720712]]],
[[[-5.5846792069775013, -5.9124806556760685, -5.0803114394427826, -4.6881162203712758], [-2.5874257010480197,
1.8448907767993687, -3.667039313813397, -6.6446300347000253], [-4.0915804091979044, -1.024012821157664,
1.3632528938281339, -1.6827760168675625]], [[-2.0658890263213059, 1.1234321330572619, 1.0472908159806233,
-0.66407250571224852], [-7.0847543674726214, 1.3508860218074719, 1.5780481027610764, 0.45725659694539855],
[-6.1820508124659277, -3.0209306703320284, -0.12268618444651436, -5.0620831677248805]]], [[[-6.8168635169011509,
-2.175899064858076, -0.99953387693515117, -4.2007864655319018], [-2.4495161214213925, 1.190807763248356,
-7.0938824479062976, 0.11576775306493436], [-1.7263175663718786, -1.0276818989963012, -7.7695074633264163,
-6.0713977192242226]], [[-6.9856792641026235, -4.250890931377544, 1.2108522240746082, -0.55379315806302154],
[0.8512666192682885, -0.052722900434595044, -2.7571628509467954, -6.6489554680613105], [-3.4968891674378253,
-5.0591653144695652, -4.7171569740176675, -7.1567367669705675]]]])+(1.-msk_ref)*numpy.array([[[[-0.29235367894345909,
-1.65887842709762, -7.1271430754699985, -0.80901716647045152], [-3.1967064764257245, -1.9804318898824711,
-2.3503092061680375, -0.59759967416893023], [-9.0775154519183694, -3.1353340622295383, -5.8057633100629005,
0.32434196548226435]], [[-0.78056803125623286, -3.1980712050127584, -6.0426784614791718, -3.2933163291714482],
[-6.5009414440814197, 0.63059685944625254, -1.1698451618523444, -5.1205676095393935], [-4.5063609059056313,
-7.2614231625260137, -6.5898966929622578, -8.0140346400212721]]], [[[-6.8762400308267031, -7.2040414795252703,
-6.3718722632919844, -5.9796770442204767], [-3.8789865248972211, 0.5533299529501674, -4.9586001376625983,
-7.9361908585492262], [-5.3831412330471062, -2.3155736450068654, 0.071692069978932516, -2.9743368407167639]],
[[-3.3574498501705072, -0.16812869079193948, -0.244270007868578, -1.9556333295614499], [-8.3763151913218223,
0.059325197958270515, 0.28648727891187509, -0.83430422690380279], [-7.4736116363151295, -4.3124914941812298,
-1.4142470082957157, -6.3536439915740814]]], [[[-8.1084243407503518, -3.4674598887072774, -2.2910947007843525,
-5.4923472893811027], [-3.7410769452705939, -0.10075306060084532, -8.3854432717554985, -1.175793070784267],
[-3.01787839022108, -2.3192427228455026, -9.0610682871756172, -7.3629585430734235]], [[-8.2772400879518244,
-5.5424517552267449, -0.080708599774593104, -1.8453539819122229], [-0.44029420458091284, -1.3442837242837964,
-4.0487236747959967, -7.9405162919105123], [-4.7884499912870266, -6.350726138318767, -6.0087177978668693,
-8.4482975908197702]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([2.1945719955206853,
-3.4851810549539852])+(1.-msk_arg0)*numpy.array([-3.159460740559509, 1.0507096466806898])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.92811762582)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([5.1226896213358133,
-0.5570634291388572])+(1.-msk_ref)*numpy.array([-0.23134311474438096, 3.9788272724958178])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([1.9387192390641195,
-2.294788495198282])+(1.-msk_arg0)*numpy.array([-3.9950296964046816, -4.9584579002903517])
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.68148355985483988, 0.33396702170122339])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([2.6202027989189594, -1.9608214734970586])+(1.-msk_ref)*numpy.array([-3.3135461365498418,
-4.6244908785891283])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[1.9335525790389809, 4.8876884032830024, -3.6794048434152948, -2.9337672885330814,
0.5880232587543972], [1.2731441866942719, 4.8021715240969982, 2.9871285060348427, 4.3674026791776921,
2.3324101078324144], [3.257367767879968, 3.614481137699638, -4.0465097244122443, -3.3712543524462166,
0.83424572698980626], [-4.7734011845397317, -1.1918316514932537, -2.641576771310632, -3.7441723823507447,
2.5792398168240602]])+(1.-msk_arg0)*numpy.array([[0.51038147587387783, -3.548018657118809, 3.7494118465432393,
3.6729170048063136, -2.9522974158811746], [3.2109365766033289, -1.7347320393345091, -0.9996429948297223,
-0.75500884718678307, 1.5928790967815267], [-4.1174844249701259, 4.2030131668606234, -4.8484509001230229,
2.7032344298767921, 4.3009935101668333], [-1.4527019870327429, 3.9347061378002781, 1.21415230923688,
-3.666838308237784, -3.8400590973123858]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.22997214356)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[5.1635247225953336, 8.117660546839355, -0.44943269985894219, 0.29620485502327121,
3.8179954023107499], [4.5031163302506245, 8.0321436676533509, 6.2171006495911953, 7.5973748227340447,
5.5623822513887671], [6.4873399114363206, 6.8444532812559906, -0.81653758085589168, -0.14128220888986398,
4.0642178705461589], [-1.5434290409833791, 2.038140492063099, 0.58839537224572069, -0.51420023879439203,
5.8092119603804129]])+(1.-msk_ref)*numpy.array([[3.7403536194302305, -0.31804651356245639, 6.979383990099592,
6.9028891483626662, 0.27767472767517809], [6.4409087201596815, 1.4952401042218435, 2.2303291487266304,
2.4749632963695696, 4.8228512403378794], [-0.88751228141377325, 7.4329853104169761, -1.6184787565666703,
5.9332065734331447, 7.5309656537231859], [1.7772701565236098, 7.1646782813566308, 4.4441244527932326,
-0.43686616468143136, -0.61008695375603317]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[-0.074742989914646785, -1.8482493880577588, 1.0926262448311599, 4.5158483202643716,
-3.0805669333005561], [0.0085606966159099684, -2.9696862086974996, 3.3024460854167597, 1.5088165460119427,
-3.6452065491857266], [0.18694035412066512, -4.6738922180085147, 3.9551045875071438, 4.0084174115638724,
-0.63332177275981749], [2.5093858800842108, -0.36171911019222946, 0.19138395375626427, -3.1795621861527734,
-2.6267949144535008]])+(1.-msk_arg0)*numpy.array([[-3.5942187686631524, -3.7060821431133406, 0.9533196788857623,
-4.8840044000628744, 0.3938790125214453], [4.0652979493208985, 4.5325841421496644, -0.4281905049316661,
-1.742508580451184, 2.7120740894023898], [0.56888661640784566, -2.4569299021956068, 3.568568120069024,
-2.0793352745659766, -1.7689628659930126], [-4.8632954420706014, -2.8828667280653364, 3.4090243893802246,
3.0651732601260697, 4.6463764755640256]])
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-1.4953863183942318, -3.5127993001524969, 2.9138150805794103, -1.6144165168200519,
-0.65062618022498242], [-4.9181569250500168, -2.6971927119277908, 4.2365880197149934, -4.2036145824282496,
2.2260090531531453], [4.0868409931398002, -3.3893548967194032, 2.9012650531553019, -2.2355683566643378,
2.9627609193479501], [4.9921359000605019, 0.6569024014440803, 3.3639734573108839, 0.89356331435440595,
-4.0709626638242327]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.5701293083088785, -5.3610486882102553, 4.0064413254105702, 2.9014318034443196,
-3.7311931135255385], [-4.9095962284341068, -5.6668789206252903, 7.5390341051317531, -2.6947980364163069,
-1.4191974960325813], [4.2737813472604653, -8.0632471147279183, 6.8563696406624457, 1.7728490548995346,
2.3294391465881326], [7.5015217801447127, 0.29518329125185083, 3.5553574110671482, -2.2859988717983675,
-6.6977575782777334]])+(1.-msk_ref)*numpy.array([[-5.0896050870573841, -7.2188814432658379, 3.8671347594651726,
-6.4984209168829263, -0.25674716770353712], [-0.85285897572911828, 1.8353914302218737, 3.8083975147833273,
-5.9461231628794335, 4.9380831425555352], [4.6557276095476459, -5.8462847989150095, 6.4698331732243259,
-4.3149036312303144, 1.1937980533549375], [0.12884045798990051, -2.2259643266212561, 6.7729978466911085,
3.9587365744804757, 0.57541381173979289]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-2.1957568090391955, 0.56747277575122101], [-1.4226171578539604,
-3.1174336379255854]], [[1.9150168705353749, 0.46771483389240665], [-0.73261624542450932, 1.4533109165427449]],
[[-4.3700026677098416, -4.4121889510507675], [-4.2432470132589684, -4.6365817911825937]], [[4.3712760608754326,
0.48815678812850649], [-4.2919585871561221, 2.8753619236403747]], [[4.7410827225779482, -3.2941488290580354],
[3.5834613437014919, 0.53477849558006074]], [[-2.2697241902980902, 1.4839036193452078], [4.3514574228344109,
2.0334834769049763]]])+(1.-msk_arg0)*numpy.array([[[1.9065956016010119, 3.8011536401496766], [4.2481111431072272,
0.7657337986451509]], [[1.7488690210709832, 4.5064595133713876], [-1.261534521038973, -1.5095749568667172]],
[[1.2010203264269057, 0.055494332510111377], [4.3269730839285749, -0.54412407243328076]], [[-2.6257140205956175,
-3.4462245120816002], [1.3451771798822101, 2.462398203439907]], [[-2.5713124204289493, 1.9356323962441504],
[1.8879658089499234, 3.1212800001648091]], [[1.942043508304808, 0.80539011514164471], [-0.3765200612428643,
0.73339801844715691]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.24723235412)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[0.05147554507665264, 2.8147051298670691], [0.82461519626188773,
-0.87020128380973727]], [[4.162249224651223, 2.7149471880082547], [1.5146161086913388, 3.700543270658593]],
[[-2.1227703135939935, -2.1649565969349194], [-1.9960146591431203, -2.3893494370667456]], [[6.6185084149912807,
2.7353891422443546], [-2.044726233040274, 5.1225942777562228]], [[6.9883150766937963, -1.0469164749421873],
[5.83069369781734, 2.7820108496959088]], [[-0.022491836182242153, 3.7311359734610559], [6.598689776950259,
4.2807158310208244]]])+(1.-msk_ref)*numpy.array([[[4.15382795571686, 6.0483859942655247], [6.4953434972230752,
3.012966152760999]], [[3.9961013751868313, 6.7536918674872357], [0.98569783307687509, 0.73765739724913093]],
[[3.4482526805427538, 2.3027266866259595], [6.574205438044423, 1.7031082816825673]], [[-0.37848166647976944,
-1.1989921579657521], [3.5924095339980582, 4.7096305575557551]], [[-0.32408006631310116, 4.1828647503599985],
[4.1351981630657715, 5.3685123542806572]], [[4.1892758624206561, 3.0526224692574928], [1.8707122928729838,
2.980630372563005]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-3.6330041831896742, 1.9011276595647058], [4.0527837903730326, 3.7453216540822218]],
[[1.1423057067323032, -4.6191355501663702], [-0.19479401086936399, 3.6518312558771875]], [[-0.78164127432320996,
-0.0025588788834731702], [-2.5155059876978534, -2.7853664238124578]], [[-2.4557560474662496, -1.7001261418483038],
[2.2437567320884249, -4.5528490181464578]], [[3.3965240991344601, 2.7531638892344281], [-1.0182649859279858,
0.37879180372082377]], [[-2.2634040587587356, -3.6908761533687482], [-2.6652399154901509,
-2.0159814304593739]]])+(1.-msk_arg0)*numpy.array([[[4.9981907924797788, 4.277720751221235], [-4.4785446333946686,
-3.8140270519701982]], [[1.4517149340948965, 1.9122847710945834], [-1.0984824997077558, 4.9260526287710995]],
[[3.0231870187238314, -4.426803554802202], [-0.1009215503507912, -2.4226611633877337]], [[3.1439947236211125,
-2.7156096061802728], [-0.27949941006709977, 0.15562912547547469]], [[-1.6704879956646712, -0.87822202800174587],
[-4.0968204088950708, -4.8812474874399072]], [[-3.0876637956180186, 0.42808604578959475], [-0.76617423765119153,
1.4811418969805343]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.655791939954395, 1.9082625611635287], [2.0305234873740705, -3.9575879711347337]],
[[0.58883813376680294, -0.44253502109642717], [-0.50659655202841058, 4.7262250303753071]], [[2.3551049262619417,
-2.7472704728416062], [-4.2131185370897501, 1.1560716927603512]], [[-1.8521430501234626, -2.8126771236453196],
[-1.6116964851382032, 4.3144406033510982]], [[-4.4005771771028979, -3.8795508309654512], [0.95903540985898683,
-0.84559016177598512]], [[-2.6007509769442674, -0.13151235868250399], [-1.5038936232862978, -3.9733280592961249]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-7.2887961231440688, 3.8093902207282344], [6.0833072777471031, -0.21226631705251187]],
[[1.7311438404991062, -5.0616705712627974], [-0.70139056289777457, 8.3780562862524945]], [[1.5734636519387317,
-2.7498293517250794], [-6.7286245247876035, -1.6292947310521066]], [[-4.3078990975897122, -4.5128032654936234],
[0.63206024695022167, -0.23840841479535957]], [[-1.0040530779684378, -1.1263869417310231], [-0.059229576068998924,
-0.46679835805516134]], [[-4.8641550357030034, -3.8223885120512522], [-4.1691335387764488,
-5.9893094897554988]]])+(1.-msk_ref)*numpy.array([[[1.3423988525253838, 6.1859833123847636], [-2.4480211460205981,
-7.7716150231049319]], [[2.0405530678616994, 1.4697497499981562], [-1.6050790517361664, 9.6522776591464066]],
[[5.3782919449857731, -7.1740740276438082], [-4.3140400874405413, -1.2665894706273826]], [[1.29185167349765,
-5.5282867298255924], [-1.891195895205303, 4.4700697288265729]], [[-6.0710651727675691, -4.757772858967197],
[-3.137784999036084, -5.7268376492158923]], [[-5.688414772562286, 0.29657368710709076], [-2.2700678609374894,
-2.4921861623155905]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[4.965007128412612, 3.4584141019026564, -1.0391619896304451, 4.5542963326499351],
[-0.0016792172679549466, -2.9053441565334981, 0.025786108583792711, -0.89554847161554374], [4.4904084527351209,
-0.89553646258473307, 3.8929449623498495, -2.8715607346304415]], [[-3.727374719009604, 2.2555823384608908,
0.53380019017552272, -0.29480940480144113], [-3.6344667828862445, -4.8499559892732567, 3.5342171405331317,
1.9875915936023327], [3.0643486049591804, -2.9482947381564806, 1.257296440825332, -4.4599817600046716]]],
[[[-3.7989993001254971, 4.2006768317373879, -1.9340842456373886, 0.25295780568139836], [0.15305381262779072,
2.184447614622945, -2.0595806484522039, 1.6196719151709491], [-1.550459702477788, 2.2328097059995393,
-3.2648987061947632, -1.7698524550474004]], [[-3.1067614393264673, 3.6490340896776274, 4.2948603770463407,
-3.4382940099694084], [-1.765073080880275, 2.5928931740693892, 2.2530590640640069, 2.7653349815108443],
[-0.88766895991026384, 3.8444038125137965, 3.8283329993863564, 1.6961545196727537]]], [[[-1.6941819291782823,
-4.3507603532160344, 0.58625398426930175, -4.9534370199923137], [4.3258398610183271, 4.7398172498630355,
-0.27425006429631082, -0.80958052389792012], [0.27800145594245151, -0.70646630926925713, -1.3619199397032533,
-0.22712536683851958]], [[-3.7307177958823781, -0.17135910311966995, -1.2454260400370809, 1.8499155339141273],
[0.7652733563966283, -4.2318891899847593, 4.1390775019993704, 2.1086112655335079], [-4.4480501135282662,
4.3290513315610166, -4.1098101623830443, -2.8839598970399614]]]])+(1.-msk_arg0)*numpy.array([[[[3.9323713317642746,
4.4527426387356446, 1.8489227456459432, 2.295838413561385], [-1.5932231826477694, -0.043483214358698064,
2.6866561252017789, -1.3064680912144833], [-4.563955043071191, -4.5294274892608124, 1.1139333008427865,
-3.356095173880258]], [[-0.39784058429088365, 1.3572530126249651, 0.73921609667405086, -2.8036097598039502],
[-1.6466307808609693, -3.6730522383966999, -4.2815488732075613, -3.0943250956889665], [0.84471742986867238,
3.3304241697775492, -2.7207357502431542, -1.8257126717947059]]], [[[0.21030801293033274, 4.6379651350087698,
4.213456762528347, 4.0550184068364885], [-2.5755175539757227, 2.6713165204428986, 3.2808072440183729,
2.8475364996882107], [4.8503832880401561, -0.89396576884489498, 4.8726952699950328, 1.8570156992262419]],
[[-4.6778874236692944, 2.1109769293880465, 0.79097589510131172, -2.1112073984121893], [2.558958067688426,
2.8307096810380727, 0.012443144332241474, -3.7601222060065065], [-1.3755439053562823, 2.9800220614031678,
1.6579582033193425, 4.4427116407434362]]], [[[-0.86660146317817688, 1.3032310329697525, 3.0027070238303377,
-2.9114837729491319], [-3.4567748888099636, 3.3638086688271702, 4.1486162466002519, 2.0749122046757407],
[0.84439318528796647, -3.6592289308593697, 0.77430002321168345, 1.7927967246699836]], [[-1.1981415218608116,
2.3445312580391588, -1.5436298697897444, 1.6111465180751141], [1.6230738725320037, -1.3035089800291666,
-4.6787506207538687, 2.9155460797717678], [3.3315156088599238, -3.5200805068877128, -1.1181004173108544,
-2.2485916181204857]]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.43950171094)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[8.4045088393544027, 6.8979158128444471, 2.4003397213113455, 7.9937980435917257],
[3.4378224936738357, 0.5341575544082926, 3.4652878195255834, 2.543953239326247], [7.9299101636769116,
2.5439652483570576, 7.3324466732916402, 0.56794097631134921]], [[-0.28787300806781335, 5.6950840494026815,
3.9733019011173134, 3.1446923061403496], [-0.19496507194445378, -1.410454278331466, 6.9737188514749224,
5.4270933045441234], [6.5038503159009711, 0.49120697278531011, 4.6967981517671227, -1.0204800490628809]]],
[[[-0.35949758918370645, 7.6401785426791786, 1.5054174653044021, 3.6924595166231891], [3.5925555235695814,
5.6239493255647357, 1.3799210624895868, 5.0591736261127398], [1.8890420084640027, 5.67231141694133, 0.1746030047470275,
1.6696492558943903]], [[0.33274027161532338, 7.0885358006194181, 7.7343620879881314, 0.0012077009723823195],
[1.6744286300615157, 6.0323948850111799, 5.6925607750057976, 6.204836692452635], [2.5518327510315268,
7.2839055234555872, 7.2678347103281471, 5.1356562306145443]]], [[[1.7453197817635084, -0.91125864227424369,
4.0257556952110924, -1.513935309050523], [7.7653415719601178, 8.1793189608048262, 3.1652516466454799,
2.6299211870438706], [3.7175031668842422, 2.7330354016725336, 2.0775817712385374, 3.2123763441032711]],
[[-0.29121608494058737, 3.2681426078221207, 2.1940756709047098, 5.289417244855918], [4.204775067338419,
-0.79238747904296858, 7.5785792129411611, 5.5481129764752986], [-1.0085484025864755, 7.7685530425028073,
-0.67030845144125362, 0.55554181390182933]]]])+(1.-msk_ref)*numpy.array([[[[7.3718730427060652, 7.8922443496774353,
5.2884244565877339, 5.7353401245031757], [1.8462785282940213, 3.3960184965830926, 6.1261578361435696,
2.1330336197273074], [-1.1244533321294004, -1.0899257783190217, 4.5534350117845772, 0.083406537061532671]],
[[3.041661126650907, 4.7967547235667558, 4.1787178076158416, 0.63589195113784047], [1.7928709300808214,
-0.23355052745490923, -0.84204716226577059, 0.34517661525282417], [4.2842191408104631, 6.7699258807193399,
0.71876596069863652, 1.6137890391470848]]], [[[3.6498097238721234, 8.0774668459505605, 7.6529584734701377,
7.4945201177782792], [0.86398415696606801, 6.1108182313846893, 6.7203089549601636, 6.2870382106300013],
[8.2898849989819468, 2.5455359420968957, 8.3121969809368235, 5.2965174101680326]], [[-1.2383857127275038,
5.5504786403298372, 4.2304776060431024, 1.3282943125296014], [5.9984597786302167, 6.2702113919798634,
3.4519448552740322, -0.32062049506471579], [2.0639578055855083, 6.4195237723449585, 5.0974599142611332,
7.8822133516852269]]], [[[2.5729002477636138, 4.7427327439115432, 6.4422087347721284, 0.52801793799265884],
[-0.017273177868172951, 6.8033103797689609, 7.5881179575420425, 5.5144139156175314], [4.2838948962297572,
-0.21972721991757904, 4.2138017341534741, 5.2322984356117743]], [[2.2413601890809791, 5.7840329689809495,
1.8958718411520463, 5.0506482290169048], [5.0625755834737944, 2.1359927309126241, -1.239248909812078,
6.3550477907135585], [6.7710173198017145, -0.080578795945922099, 2.3214012936309363, 1.190910092821305]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[3.2510674404409041, 2.1171696862303406, 2.9610258759664267, -3.8373977579450456],
[0.75383244276133166, 2.4077943881602728, 3.873284406870285, 3.7937584009819574], [-4.6069898901399364,
-2.5452970249895754, 3.650830786457707, -0.56630176651201847]], [[3.6738989513815135, -1.1553536380556686,
4.303352195803182, 2.0201689947921695], [2.5110280594242029, 1.1178178456135743, 3.5722095880572251,
-3.0495901167648221], [-1.8161969765914288, -3.850369287459924, 1.8305771607495833, 3.8129356009276751]]],
[[[4.8159492177547296, -2.7259760165966638, -0.056119891503465524, 3.2320437499651025], [4.1412540490540568,
2.3145635424798332, 4.2298625240821792, -4.9326174629443722], [1.2505234798682396, 4.1728981653768358,
-1.4526511101284445, -0.73865645812869563]], [[-2.5027203270038956, -0.75821705726011146, -2.0074201432570495,
-0.20166798891695503], [1.7962444938241209, 4.9186635916785164, -3.3612255674731486, -3.1402103698143327],
[4.8100127068213077, -3.7003932729639377, -2.3809463861562454, 2.6337296431542621]]], [[[0.8461884816413443,
2.2850095300693116, 3.1039351776827235, 2.7358221987272575], [-1.331100327658973, -2.4718869003284438,
3.8392116060077814, 3.7886003252177218], [-2.740692362699221, -1.1104811343803189, 1.065443269317063,
-1.604926521206449]], [[3.1359320207935291, 2.4159415877072101, -2.9781841648177654, 0.4457695581762291],
[1.4022534028069558, 3.2181877465159641, 4.1561033889739196, -4.5314636502141923], [2.4896032954770373,
-1.6749755107952033, -4.2977752660345292, 4.3862296692093636]]]])+(1.-msk_arg0)*numpy.array([[[[3.8098232095134126,
-2.0180524002497693, 4.420784171182504, -2.4324750966542674], [2.4681882567616125, 3.0279649104786941,
2.2383665512055266, -0.091420157761364251], [4.7846856391630048, 0.45001495814867454, 2.8428137570111911,
3.6542996408716562]], [[-3.3832925941075711, -4.6684050424331947, 2.7145812310865534, 0.57489640415196952],
[3.2363298539062395, -0.28076205609599914, -2.1610563710523598, -3.9600308036480381], [4.1445091213012599,
0.23464603550937735, -4.9214532841127738, 3.7601288072640866]]], [[[4.5878923885513938, -2.7602444517968006,
-2.4823493575559641, -1.1998619544811917], [-1.0165322624110429, 4.8743114304602564, 3.0069704689379755,
2.0086372739622043], [-1.7482883016273565, 4.5233781656491008, 1.0481669308330579, 3.3780108680134457]],
[[-4.5351514069636076, -4.760484108729206, -1.7334568308716203, -4.3080131499917833], [4.0321976091043883,
-2.6576000312675063, 1.3372423488299923, -3.8949616711167625], [3.5793384711817051, 2.60693067621275,
1.8056256765125287, -3.9915454170699869]]], [[[0.39851532295995273, 2.2465287291059273, 0.64170560779626662,
-4.7331314705888738], [3.5329039709028898, -2.5311269573107662, 2.8367974744858193, -4.3457969220676684],
[-1.526677955424999, -2.5983211468943357, -1.3293797580217093, -3.1887378668078279]], [[3.1416335105809505,
0.35146012646543134, 2.428390004415637, 2.7813900205500861], [3.5228217461650111, -0.012304332300811183,
-3.1395042313107369, 4.8647351561551702], [2.2570133784920099, -1.7535240218446777, 0.38792070998653028,
-0.21839923153693785]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-0.55399336747432937, -3.6468486902030306, 2.4533567494215669, 4.8267547347789659],
[1.1480960590338416, 3.5599245920968787, -2.8247534868419724, -2.2031349101131505], [1.7520095897646017,
4.4293583295521266, -3.2046920932014888, -3.8760923163847472]], [[3.9288042477427645, 1.103593535294765,
0.62546922225950485, 2.5431633219905123], [2.5483588394973191, -0.82358610517599207, -0.47010674146441023,
2.7635563586840011], [3.5616440522317419, 2.2995934729430481, -3.501591556463012, 1.3778428754586027]]],
[[[-4.3918539920661051, 0.24976043236636869, -2.4847081470778463, 4.8636790550226792], [-4.2172400078729559,
-2.0316184192507647, -0.53464794178739794, -0.035422588600630966], [1.7049703562375615, 4.2019750499164399,
-3.7430217705554858, -3.4952387702082346]], [[-0.39925876875124189, 1.4505137462439404, -4.1941814051173072,
-1.844757872605356], [-3.4448187389632414, -3.5340944666273377, -3.178247383159305, -1.7824872241435519],
[-3.6843631882800798, -4.1186208792142187, 2.0636953370355959, -0.18717114434561122]]], [[[-2.4316812831173742,
0.39582208925882689, 1.4893695917228467, -3.1232026180567773], [2.1122901499636226, 4.9884613457151978,
-4.7793541216702149, -3.9541373136233391], [-4.8256481088328194, -0.10764491664526066, 2.9970513787255895,
-1.0443943611478437]], [[3.6491162738908258, 3.4225261399204765, -2.9600723325757849, 3.3422667802452324],
[-3.763493116056098, 4.6894908619506595, 2.532040050484988, 0.99028387045053101], [2.5962274887920085,
-0.2721955960411897, -4.7946284910477441, -0.96141278632713245]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[2.6970740729665748, -1.52967900397269, 5.4143826253879936, 0.98935697683392032],
[1.9019285017951733, 5.9677189802571515, 1.0485309200283126, 1.5906234908688068], [-2.8549803003753347,
1.8840613045625512, 0.44613869325621813, -4.4423940828967652]], [[7.6027031991242779, -0.051760102760903592,
4.9288214180626868, 4.5633323167826818], [5.059386898921522, 0.29423174043758227, 3.1021028465928149,
-0.28603375808082099], [1.7454470756403131, -1.550775814516876, -1.6710143957134287, 5.1907784763862779]]],
[[[0.42409522568862457, -2.4762155842302951, -2.5408280385813118, 8.0957228049877816], [-0.075985958818899135,
0.28294512322906851, 3.6952145822947813, -4.9680400515450032], [2.9554938361058012, 8.3748732152932757,
-5.1956728806839303, -4.2338952283369302]], [[-2.9019790957551375, 0.6922966889838289, -6.2016015483743567,
-2.046425861522311], [-1.6485742451391205, 1.3845691250511787, -6.5394729506324536, -4.922697593957885],
[1.1256495185412279, -7.8190141521781564, -0.3172510491206495, 2.4465584988086508]]], [[[-1.5854928014760299,
2.6808316193281385, 4.5933047694055702, -0.38738041932951983], [0.78118982230464962, 2.516574445386754,
-0.94014251566243345, -0.16553698840561726], [-7.5663404715320404, -1.2181260510255796, 4.0624946480426525,
-2.6493208823542926]], [[6.7850482946843549, 5.8384677276276866, -5.9382564973935503, 3.7880363384214615],
[-2.3612397132491423, 7.9076786084666235, 6.6881434394589077, -3.5411797797636613], [5.0858307842690458,
-1.9471711068363931, -9.0924037570822733, 3.4248168828822312]]]])+(1.-msk_ref)*numpy.array([[[[3.2558298420390832,
-5.6649010904527994, 6.8741409206040709, 2.3942796381246985], [3.6162843157954541, 6.5878895025755728,
-0.58638693563644573, -2.2945550678745148], [6.5366952289276066, 4.8793732877008011, -0.36187833619029774,
-0.22179267551309101]], [[0.54551165363519338, -3.5648115071384296, 3.3400504533460582, 3.1180597261424818],
[5.7846886934035586, -1.1043481612719912, -2.63116311251677, -1.196474444964037], [7.7061531735330018,
2.5342395084524254, -8.4230448405757858, 5.1379716827226893]]], [[[0.19603839648528876, -2.5104840194304319,
-4.9670575046338108, 3.6638171005414875], [-5.2337722702839988, 2.8426930112094917, 2.4723225271505775,
1.9732146853615733], [-0.043317945389794943, 8.7253532155655407, -2.6948548397224279, -0.11722790219478885]],
[[-4.9344101757148495, -3.3099703624852657, -5.9276382359889279, -6.1527710225971397], [0.58737887014114687,
-6.1916944978948436, -1.8410050343293127, -5.6774488952603139], [-0.10502471709837469, -1.5116902030014687,
3.8693210135481246, -4.1787165614155981]]], [[[-2.0331659601574215, 2.6423508183647542, 2.1310751995191133,
-7.8563340886456512], [5.6451941208665124, 2.4573343884044316, -1.9425566471843956, -8.2999342356910084],
[-6.3523260642578183, -2.7059660635395963, 1.6676716207038802, -4.2331322279556716]], [[6.7907497844717764,
3.7739862663859078, -0.53168232816014793, 6.1236568007953185], [-0.24067136989108695, 4.6771865296498483,
-0.60746418082574882, 5.8550190266057012], [4.8532408672840184, -2.0257196178858674, -4.4067077810612139,
-1.1798120178640703]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(1.30830371112,self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0412291309402)
sub=res.substitute({arg1:s1})
ref=Data(1.26707458018,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.2604726935,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.8546037299533653, -1.305392606117024])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.4058689635493371, -2.9550800873856784]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(0.902009664206,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-3.117681444740418, -3.2512793024980069, -3.7762244881344218, -0.50644943812549315,
3.066726444630655], [-2.6348956508380805, -0.90372740616696667, 0.5252271533586752, 2.0132741900533446,
2.0837322808099037], [0.088376617597372586, 0.67864487020517306, 3.7057383001711681, 1.0445042366908988,
-2.1093161712985955], [4.328915747720707, -0.73501622742024342, -0.088412628376807412, -3.0414953794209754,
1.610361274316344]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.0196911089468177, 4.1532889667044071, 4.6782341523408215, 1.4084591023318929,
-2.1647167804242553], [3.5369053150444802, 1.8057370703733664, 0.37678251084772452, -1.1112645258469449,
-1.181722616603504], [0.81363304660902713, 0.22336479400122666, -2.8037286359647684, -0.14249457248449904,
3.0113258355049952], [-3.4269060835143073, 1.6370258916266431, 0.99042229258320713, 3.9435050436273751,
-0.7083516101099443]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(4.30012329043,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-2.4328051948060772, 1.3096803933228829], [-1.9201038070201615, 2.2529209930562519]],
[[4.4911763191005498, -0.0070408039855616167], [-4.5070979412665588, 0.23394826644475319]], [[-2.0679275681214171,
4.7260141882743518], [-1.9530690972223672, 4.2165911161948344]], [[4.2340594486013217, 0.31531838157863668],
[1.2102543060708451, 4.5768051588147358]], [[4.9016533619135778, 1.0237157761801843], [-1.6198381225390657,
1.509534129406096]], [[-2.8351524725878399, -0.8712771035569391], [-1.2500793307427105, 0.52784760832550681]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.732928485237343, 2.990442897108383], [6.2202270974514278, 2.0472022973750139]],
[[-0.19105302866928398, 4.3071640944168275], [8.8072212316978238, 4.0661750239865126]], [[6.3680508585526834,
-0.42589089784308598], [6.2531923876536331, 0.083532174236431445]], [[0.066063841829944181, 3.9848049088526292],
[3.0898689843604208, -0.27668186838346998]], [[-0.60153007148231197, 3.2764075142510816], [5.9199614129703315,
2.7905891610251699]], [[7.1352757630191057, 5.1714003939882049], [5.5502026211739768,
3.772275682105759]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-3.5839426267,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-2.9729696451374421, 2.7845056200381855, 0.070436437102223692, 0.66836223796868044],
[0.40381761203578836, -1.7869220467261826, -4.3681167712065552, 1.0762008553734699], [-3.4293067325266744,
-3.8959384230092855, -4.2869773308861872, -3.5982581222849266]], [[3.8085384127848325, -4.9902013750126919,
1.7025140755302903, -1.8585391591273237], [-1.8948326373524536, 2.0874520505745666, -1.8647114753321095,
3.9665649921657007], [-2.6617432109425376, -0.043781338271665859, -4.3924469058705498, -4.6038566089651081]]],
[[[4.1612414942039617, -0.24691459950937489, 1.8801077349311939, -4.0607604598486082], [-0.48975931816079132,
4.776651055544292, 2.5892649853139229, 2.6300466396994988], [-0.6331493645323949, -4.8747858313906498,
2.5714462579440713, -0.12625615907892662]], [[1.8766405716198298, 0.97931619405259518, -1.2333119307639082,
3.632140408148242], [0.96979041799351151, -4.0819837173164526, 3.4625138677193164, -1.7431511130821575],
[-2.7530992377422381, -3.1495479306859906, 1.3466227111831488, -2.3016323722421128]]], [[[-2.8378224290103491,
-0.7230057223129247, 0.95865498114414649, 0.14297561114879365], [2.3319242484901492, 4.9972541799736234,
-1.7121650896762564, 1.6097551517446558], [2.7133813837524077, -3.1913323682416994, -0.39896207531318861,
-3.2753783571190107]], [[1.3158800827274399, -0.034075573686918936, 3.2707189112070392, -2.9118211235462041],
[4.362994678434946, -3.2771781302292515, 3.4919565479064456, 1.6061522420425254], [-1.8973785117347788,
-4.4461539342202174, -3.8132486661529263, -0.74231592463494511]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.61097298156602342, -6.368448246741651, -3.6543790638056892, -4.252304864672146],
[-3.9877602387392539, -1.7970205799772829, 0.78417414450308964, -4.6601434820769354], [-0.15463589417679113,
0.31199579630581997, 0.70303470418272163, 0.014315495581461057]], [[-7.392481039488298, 1.4062587483092264,
-5.2864567022337559, -1.7254034675761418], [-1.689109989351012, -5.6713946772780321, -1.7192311513713561,
-7.5505076188691662], [-0.9221994157609279, -3.5401612884317997, 0.80850427916708423, 1.0199139822616425]]],
[[[-7.7451841209074272, -3.3370280271940906, -5.4640503616346594, 0.4768178331451427], [-3.0941833085426742,
-8.3605936822477567, -6.1732076120173884, -6.2139892664029643], [-2.9507932621710706, 1.2908432046871843,
-6.1553888846475369, -3.4576864676245389]], [[-5.4605831983232953, -4.5632588207560607, -2.3506306959395573,
-7.2160830348517075], [-4.553733044696977, 0.49804109061298707, -7.0464564944227819, -1.840791513621308],
[-0.83084338896122745, -0.43439469601747493, -4.9305653378866143, -1.2823102544613527]]], [[[-0.74612019769311644,
-2.8609369043905408, -4.542597607847612, -3.7269182378522592], [-5.9158668751936148, -8.5811968066770881,
-1.8717775370272092, -5.1936977784481213], [-6.2973240104558732, -0.39261025846176612, -3.1849805513902769,
-0.30856426958445482]], [[-4.8998227094309055, -3.5498670530165466, -6.8546615379105047, -0.67212150315726138],
[-7.9469373051384116, -0.306764496474214, -7.0758991746099111, -5.1900948687459909], [-1.6865641149686867,
0.8622113075167519, 0.22930603944946082, -2.8416267020685204]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([2.6649927252905226, 0.29496968217893382]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.03366663195)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.6313260933372291, -0.73869694977435962]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([3.9090880537794526, -3.9706193840215942]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.7233870114697742, 0.99043840493200186])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.6324750652492268, -4.9610577889535961]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.8033126273843685, 0.51509190965393792, 3.931306976936968, -3.3823534090429486,
-2.3486719525293087], [-2.9837425664154784, -2.4457160287299686, 3.8981965382683743, -0.89609359902144714,
4.1620406111464288], [3.6868893591462246, -2.9993029597001462, 1.8283120616948665, -2.0195573949932277,
-2.1640627499057361], [-2.9723279323425489, -4.8559061533246624, -1.0130455282709172, -3.7833351321644395,
3.514692525422209]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.86937457463)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.0660619472497519, -4.3542826649801825, -0.93806759769715242, -8.2517279836770694,
-7.2180465271634286], [-7.8531171410495988, -7.315090603364089, -0.97117803636574607, -5.7654681736555675,
-0.70733396348769162], [-1.1824852154878958, -7.8686775343342665, -3.0410625129392539, -6.8889319696273486,
-7.0334373245398565], [-7.8417025069766693, -9.7252807279587827, -5.8824201029050371, -8.6527097067985608,
-1.3546820492119114]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-1.1140360715186182, -1.5235600156934481, 4.3075103934286023, 4.6800377743432158,
-3.2505150436972521], [0.39123458636258768, 0.41088806870879768, -2.9614108446790501, 1.1049238977643405,
0.92166667279843395], [0.54565864417397059, -4.8476249672143004, 4.9444652981547943, 4.0252126389168215,
-3.9123423425216322], [-3.6777596228844844, -3.4408972758983558, 2.7718180074050611, -0.3997152204895924,
-0.16573647825956073]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.4209487163246299, 1.3152643083131128, -0.71046464711788015, 0.21557543046364458,
-2.202065459251934], [-3.9101544501984198, -2.8682151089642827, 2.7125251197023488, 1.4173123031722534,
2.7246295240806209], [-1.5744991442525436, 3.0598215212654001, 0.63494427405471487, -4.906149376046594,
-1.6839564426436748], [4.0729555430880922, -0.83371622418680769, 0.46337987461630981, 4.0014755703742395,
-2.1103899940006032]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3069126448060118, -2.8388243240065609, 5.0179750405464825, 4.4644623438795712,
-1.0484495844453181], [4.301389036561007, 3.2791031776730803, -5.6739359643813989, -0.31238840540791291,
-1.8029628512821869], [2.1201577884265141, -7.9074464884797004, 4.3095210241000794, 8.9313620149634154,
-2.2283858998779573], [-7.7507151659725766, -2.6071810517115481, 2.3084381327887513, -4.4011907908638319,
1.9446535157410425]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.6064326776506652, 4.9989076052590633], [-3.0068821433777249, -3.1193113732509516]],
[[-1.3190483681618739, 3.9479827067009108], [1.0954417889014865, 4.6359051697534426]], [[-2.9778493741722056,
3.4845430816156977], [1.7569072943914552, 1.1616150547614428]], [[-0.91210869485198565, -1.3406976214361355],
[3.2217649968914159, -2.662260898242006]], [[4.1697693146337542, -1.1741423631833072], [-4.9803850608859115,
1.2700647554700222]], [[4.6074170359664368, 1.453706456526124], [0.20949339688511692,
3.0091215511346796]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-1.04145599079)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5649766868561219, 6.0403635960536066], [-1.9654261525831815, -2.0778553824564083]],
[[-0.27759237736733056, 4.9894386974954541], [2.1368977796960298, 5.6773611605479859]], [[-1.9363933833776623,
4.525999072410241], [2.7983632851859985, 2.2030710455559861]], [[0.12934729594255767, -0.29924163064159215],
[4.2632209876859593, -1.6208049074474626]], [[5.2112253054282975, -0.13268637238876391], [-3.9389290700913682,
2.3115207462645655]], [[5.6488730267609801, 2.4951624473206673], [1.2509493876796602,
4.0505775419292229]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[2.0075159970537113, 4.417162011434554], [0.71949384400506577, 1.0783048900035652]],
[[4.7614254606302335, -2.0888542276996978], [-3.5997702799671547, 4.2825487871951644]], [[-0.39389734575197544,
1.3283252585178928], [3.6919455158435834, -0.76277259642421402]], [[-4.4972180700076887, -3.7983795355307128],
[-0.26779668046970784, -0.79380221724008582]], [[-2.0572521505738273, -1.5154686544559368], [4.0972713376059851,
4.5986089620495108]], [[-1.3971821196462377, 0.16028646761807508], [-0.63755809097850857,
-3.3787710682197272]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[3.5103565349856751, 0.91526758558677379], [-3.7224124618951135, -0.27931399630195397]],
[[1.5813622936549105, 3.6172915696233972], [-1.2364412564258132, 0.16417768270487709]], [[0.64050559170122234,
4.6361361331624593], [-0.47839680540824325, -2.1615310941440589]], [[-0.85667930966756511, 1.669882578368358],
[0.22343162562157293, 0.80905790542025358]], [[-3.5873387244847543, 3.1163266795230058], [3.5553732672252671,
-4.6758779472194405]], [[3.6742958529176484, 0.58762359541383802], [1.5778519953325496, -0.39731537378910975]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5028405379319638, 3.5018944258477802], [4.4419063059001793, 1.3576188863055192]],
[[3.180063166975323, -5.7061457973230949], [-2.3633290235413416, 4.1183711044902873]], [[-1.0344029374531978,
-3.3078108746445665], [4.1703423212518267, 1.3987584977198448]], [[-3.6405387603401236, -5.4682621138990708],
[-0.49122830609128076, -1.6028601226603394]], [[1.5300865739109271, -4.6317953339789426], [0.54189807038071791,
9.2744869092689513]], [[-5.0714779725638861, -0.42733712779576294], [-2.2154100863110582,
-2.9814556944306174]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.66483074145605592, 2.9129070748039982, -1.8655842911981346, -1.098354904466996],
[1.7426470733136448, -2.4896761957460898, 4.3864323453867851, -4.0781460331955177], [-0.62183708580819008,
-2.6186592235582786, -1.8750164189422014, -3.9631241880095969]], [[4.0419620323350909, 0.15536839603964836,
1.9771157591398101, -2.6101097405194453], [-4.7364297803535704, 1.8318126417179714, 3.2354822684907454,
2.2507758179659376], [-4.8699934080808029, -0.35744120243411981, 4.0908957400805122, -3.8440017446794084]]],
[[[4.5466344627836612, -2.8174576749848423, -0.32339288977492142, -3.3368918944053516], [3.3311423168153738,
-1.2448667289851647, -0.66737673743075376, -3.9953617725851598], [-4.8878412407428931, 3.1347720870691358,
-2.4390985397355847, -3.5615840737730475]], [[-3.7978882365989697, 4.345238312451805, 2.8310129832366435,
2.8564779239624674], [-0.85025481289091864, -4.3757742754757345, 3.5451710843902031, -2.5068001174158816],
[2.6943798866386315, 2.2746017608025317, -4.2655778273063607, 0.97165631163417387]]], [[[-2.9330039029788955,
4.3910413333213238, 2.5513441899802833, -3.8678703253194402], [-2.6748516851594308, -3.8887038302549062,
1.2485088138696518, -3.9629424578182251], [-0.38166273681210328, 3.82781593241344, -4.1817331752844087,
4.682478964767725]], [[-0.85849290617372809, -0.49338756563096275, -1.0480256440941615, -0.51008618582467946],
[-0.26820315453886501, 4.8354933917592806, 2.9555158912003154, -2.4766421456452479], [2.5098219987182944,
3.6215601735655589, -4.4497307132070123, -3.9295385075107028]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.59361652138)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.2584472628375467, 5.506523596185489, 0.72803223018335617, 1.4952616169144948],
[4.3362635946951356, 0.10394032563540101, 6.9800488667682759, -1.4845295118140269], [1.9717794355733007,
-0.025042702176787834, 0.7186001024392894, -1.3695076666281061]], [[6.6355785537165817, 2.7489849174211392,
4.5707322805213009, -0.01649321913795454], [-2.1428132589720796, 4.4254291630994622, 5.8290987898722362,
4.8443923393474284], [-2.2763768866993122, 2.236175318947371, 6.6845122614620029, -1.2503852232979176]]],
[[[7.140250984165152, -0.22384115360335155, 2.2702236316065694, -0.74327537302386082], [5.9247588381968646,
1.3487497923963261, 1.926239783950737, -1.401745251203669], [-2.2942247193614023, 5.7283886084506266,
0.15451798164590613, -0.96796755239155674]], [[-1.2042717152174789, 6.9388548338332958, 5.4246295046181343,
5.4500944453439581], [1.7433617084905721, -1.7821577540942437, 6.1387876057716939, 0.08681640396560919],
[5.2879964080201223, 4.8682182821840225, -1.6719613059248699, 3.5652728330156647]]], [[[-0.33938738159740467,
6.9846578547028146, 5.1449607113617741, -1.2742538039379494], [-0.081235163777940045, -1.2950873088734154,
3.8421253352511426, -1.3693259364367343], [2.2119537845693875, 6.4214324537949308, -1.5881166539029179,
7.2760954861492158]], [[1.7351236152077627, 2.100228955750528, 1.5455908772873292, 2.0835303355568113],
[2.3254133668426258, 7.4291099131407714, 5.5491324125818062, 0.11697437573624292], [5.1034385200997852,
6.2151766949470497, -1.8561141918255215, -1.335921986129212]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.140332416756844, -4.5756565160935745, 1.0268217328307561, 1.594533973931731],
[4.1426026647673879, 0.1548614651600202, 3.351820863446946, 0.54777524679756073], [-4.6470169243406527,
-3.4101935702258368, 1.3604597013400213, -4.3236653508957374]], [[2.3543066928954612, 1.6355558219698443,
3.8590758340122093, 0.055467084597328409], [1.3949738751098479, -2.9042097100731445, 2.1331143130237962,
-0.45715627400394165], [3.9505052117900146, -4.8644226435153097, 0.13641466419900183, 0.92434447564323374]]],
[[[-4.2036478385109302, -2.2096856472681958, -3.309442061812593, -0.17761420723311439], [-4.5417481392819026,
3.354117107537796, 2.9925164896060084, 4.231145636082223], [-4.3165407391400308, -0.16204594013147311,
-1.5308101185053733, 3.7017204822457384]], [[2.4648028362561725, 0.43817614121240833, -4.4908194091317366,
-0.081928750874263656], [-3.4087689978816016, 4.259133980931324, -4.2850896710829334, 4.6395735766216326],
[-1.3584480043808989, -4.7738821023855085, -1.2617431337636842, -1.2598313032270116]]], [[[2.2708892792624855,
1.9132737394453327, -0.50215367058696003, 0.19108419265161469], [-2.0796597802531669, 1.1505151966811367,
1.2957662425378791, -1.5883201097665802], [-1.7035021892623838, 4.8639671345493021, 3.1243484697100534,
0.47610495992410051]], [[-4.0444287366693015, -1.3614006776767349, -0.18268931922481002, 4.8063591217845332],
[3.1407426206783704, 2.8940879164962441, -4.9664997014592807, 1.6951588068340158], [-3.895479459710558,
1.7220903215355694, -3.7165673657855267, 3.1903385713544257]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-4.3482304868754991, -1.2480666735558845, 0.43538858115159051, -2.0858236027245205],
[-2.442305699452354, 2.0213192586154003, -2.5262404161243679, -4.458062700052194], [0.26228138879138641,
-2.6430658161459242, -4.7246503759525602, 4.2538788761081854]], [[-1.6124403577544308, -1.8284497197976037,
-3.0160374139385002, 2.7523938918136759], [1.4437250527651582, -2.7814473787336489, 3.5116683735594361,
-3.9808640616716562], [1.7054962689298705, 4.7974185413341068, 1.9447068850818283, -1.2797130952071156]]],
[[[3.7642823106611107, 0.11145650212965919, -0.096799862214571597, 2.0215787533002523], [0.26390717935294816,
0.12612295721321498, 4.0275730341758482, -1.2268861937462172], [-2.947926663434548, -1.4514539315574626,
2.4550945474164232, -2.7897655841602651]], [[-1.5947829088079746, 0.80620330852535815, -4.5614285986030234,
-1.9102368071164841], [2.0807019362652692, -4.099640999530064, -1.8395330667711352, -4.6367501410986929],
[-2.5162327168837786, 4.6954385782651951, -2.1576821461704854, -1.62194811763983]]], [[[0.06729391952569852,
-0.57919376543293488, -3.1838952254737416, 1.7056529660452817], [3.6116233555564143, 0.81964000588296315,
-0.16440769780998377, 0.079355513141521783], [2.9805073823987431, 1.3188532056435962, 3.4153481616516537,
-2.5138710663982189]], [[2.8884594089569315, 1.1351683507610142, -0.68804270946144719, -4.7325886514124882],
[1.1204800401276476, 0.55566378590737031, 0.94240513232859335, 2.9610440134171334], [-2.6222587774463815,
-4.4048348584786705, -0.29650368246657699, -1.0078523107846902]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.4885629036323431, -3.32758984253769, 0.59143315167916555, 3.6803575766562515],
[6.5849083642197419, -1.8664577934553801, 5.8780612795713143, 5.0058379468497547], [-4.9092983131320391,
-0.76712775407991263, 6.0851100772925815, -8.5775442270039228]], [[3.9667470506498921, 3.464005541767448,
6.8751132479507095, -2.6969268072163475], [-0.048751177655310229, -0.12276233133949566, -1.3785540605356399,
3.5237077876677145], [2.2450089428601441, -9.6618411848494166, -1.8082922208828265, 2.2040575708503494]]],
[[[-7.9679301491720409, -2.321142149397855, -3.2126421995980214, -2.1991929605333667], [-4.8056553186348507,
3.227994150324581, -1.0350565445698399, 5.4580318298284407], [-1.3686140757054828, 1.2894079914259895,
-3.9859046659217965, 6.4914860664060035]], [[4.0595857450641475, -0.36802716731294982, 0.070609189471286804,
1.8283080562422205], [-5.4894709341468708, 8.3587749804613871, -2.4455566043117982, 9.2763237177203255],
[1.1577847125028797, -9.4693206806507035, 0.89593901240680118, 0.3621168144128184]]], [[[2.203595359736787,
2.4924675048782676, 2.6817415548867816, -1.514568773393667], [-5.6912831358095808, 0.33087519079817351,
1.4601739403478629, -1.667675622908102], [-4.684009571661127, 3.5451139289057059, -0.29099969194160025,
2.9899760263223194]], [[-6.932888145626233, -2.4965690284377491, 0.50535339023663717, 9.5389477731970214],
[2.0202625805507228, 2.3384241305888738, -5.908904833787874, -1.2658852065831177], [-1.2732206822641765,
6.1269251800142399, -3.4200636833189497, 4.1981908821391158]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(-2.29417952191,self.functionspace)
arg0.setTaggedValue(1,-4.27612309963)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.86386679086)
sub=res.substitute({arg1:s1})
ref=Data(0.569687268944,self.functionspace)
ref.setTaggedValue(1,-1.41225630877)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(-4.72691427991,self.functionspace)
arg0.setTaggedValue(1,0.483106242273)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-0.58516003749737244, 2.93231182282255])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.1417542424175267, -7.6592261027374491]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0682662797700972, -2.4492055805498252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(4.84060376911,self.functionspace)
arg0.setTaggedValue(1,-3.32867505476)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[3.5332516865172998, 4.2256878903288939, -4.6404295927681405, 4.9721874322243114,
-1.5545932240349902], [0.40603544670242542, -2.879718425724147, -2.1385047584627337, 4.6127992237598132,
0.57646645021785048], [-2.6334801212800754, -2.3655947826469701, 0.48086858542515643, 1.0360291664664301,
-3.4378490059536082], [-0.23853194944872236, -2.0363663305583768, -2.3289186751171798, 3.5102407359843486,
4.1303419895739388]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3073520825884426, 0.6149158787768485, 9.4810333618738838, -0.13158366311856895,
6.3951969931407326], [4.434568322403317, 7.7203221948298895, 6.9791085275684761, 0.2278045453459292,
4.2641373188878919], [7.4740838903858178, 7.2061985517527125, 4.359735183680586, 3.8045746026393124,
8.2784527750593497], [5.0791357185544648, 6.8769700996641188, 7.1695224442229222, 1.3303630331213938,
0.71026177953180358]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-6.8619267412736988, -7.5543629450852929, 1.3117545380117415,
-8.3008624869807104, -1.7740818307214088], [-3.7347105014588244, -0.44895662903225197, -1.1901702962936653,
-7.9414742785162122, -3.9051415049742495], [-0.69519493347632366, -0.96308027210942893, -3.8095436401815554,
-4.3647042212228291, 0.10917395119720918], [-3.0901431053076767, -1.2923087241980222, -0.99975637963921926,
-6.8389157907407476, -7.4590170443303379]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-3.20552188916,self.functionspace)
arg0.setTaggedValue(1,-0.473083670166)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.71230320805011704, -3.008236723891188], [0.81066003773158002, -3.6043239509733382]],
[[3.691034498943317, -3.3919882986743777], [0.84551364067512935, 3.3207859438709946]], [[0.41963337446652105,
-3.6038224020133991], [-2.3537235378574151, -3.7120927558232997]], [[-3.4588851001838727, -0.31880183563871789],
[-1.3379489058063267, -3.9118810181560226]], [[4.4984539881701195, -3.2158956295350851], [1.5013508852420685,
2.8717656529358955]], [[-0.13701019263353231, -3.1176264463626078], [-1.67955120335195, 4.317481449568719]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.917825097207726, -0.19728516526642093], [-4.016181926889189, 0.3988020618157293]],
[[-6.896556388100926, 0.18646640951676874], [-4.0510355298327383, -6.5263078330286035]], [[-3.62515526362413,
0.39830051285579016], [-0.85179835130019388, 0.50657086666569073]], [[0.2533632110262638, -2.886720053518891],
[-1.8675729833512822, 0.70635912899841369]], [[-7.7039758773277285, 0.010373740377476182], [-4.7068727743996774,
-6.0772875420935044]], [[-3.0685116965240766, -0.087895442795001166], [-1.525970685805659,
-7.523003338726328]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-1.1853868782160886, 2.5351530537252165], [-1.2837437078975515,
3.1312402808073667]], [[-4.1641181691092886, 2.9189046285084062], [-1.3185973108411009, -3.7938696140369661]],
[[-0.89271704463249257, 3.1307387318474276], [1.8806398676914435, 3.2390090856573281]], [[2.9858014300179012,
-0.15428183452725364], [0.86486523564035522, 3.4387973479900511]], [[-4.9715376583360911, 2.7428119593691136],
[-1.97443455540804, -3.344849323101867]], [[-0.33607347753243921, 2.6445427761966362], [1.2064675331859784,
-4.7905651197346906]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-0.215341183726,self.functionspace)
arg0.setTaggedValue(1,-3.01917111711)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[3.1718058337950783, -4.3218518167555349, 4.7360170033398816, 2.6415781893387447],
[1.7953624357215787, 0.37239845986582054, 0.85595953231170441, -4.2093909477304852], [-4.0724848735753412,
-2.3789549933876364, 3.8266481046469991, -4.4686983670793881]], [[-1.3807814097985793, -0.9345570079736385,
3.2111606830229267, 2.5248569160832579], [-0.19847478717542089, 3.6200277417416071, -1.3367301493578787,
-1.9914051287776093], [4.2384277387383236, -3.1625190831895669, -4.8267032630177118, -3.7590986361039294]]],
[[[-0.96721285038350846, 0.23717549644533698, -2.0558971771798862, -2.1889488119398925], [2.1163450477817447,
-4.308535473047935, 0.96468545582662735, 0.58036767508710252], [-0.26889479983427034, -4.6749066439752021,
-2.6908936581627731, 3.3090528029139286]], [[1.0683391958055246, -4.3705975019062535, 4.6959723711804546,
-0.58815635047014858], [-1.7921642772643898, 2.8079866307247423, 4.5837878995413348, -3.6656523242301429],
[2.1083853748587442, -0.44280454111162726, -2.5427523262585563, 3.9551312168955626]]], [[[4.0479839543530591,
1.694708528108122, -1.8081650371476021, 2.5627212563151982], [2.9443513555348222, -3.4330381296191126,
-2.3471872352829837, 2.9291777099369405], [0.92208424820838264, -1.7857214370413055, 3.2638247404414695,
3.3713981402987798]], [[-2.3853121535462418, 2.1417428055374232, 3.1558224539661612, -4.4802179321245248],
[-3.0197245205703069, 2.7624146301708477, -4.6790033997765104, -4.0453165901737584], [4.8295161047601614,
-3.5764718373510842, 4.356981591617421, -4.7034098127513264]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-3.3871470175211567, 4.1065106330294565, -4.95135818706596, -2.856919373064823],
[-2.0107036194476571, -0.5877396435918989, -1.0713007160377828, 3.9940497640044068], [3.8571436898492628,
2.163613809661558, -4.0419892883730775, 4.2533571833533097]], [[1.165440226072501, 0.71921582424756014,
-3.426501866749005, -2.7401980998093363], [-0.01686639655065747, -3.8353689254676855, 1.1213889656318003,
1.776063945051531], [-4.4537689224644019, 2.9471778994634885, 4.6113620792916334, 3.543757452377851]]],
[[[0.7518716666574301, -0.45251668017141533, 1.8405559934538078, 1.9736076282138142], [-2.3316862315078231,
4.0931942893218567, -1.1800266395527057, -0.79570885881318087], [0.053553616108191981, 4.4595654602491237,
2.4755524744366948, -3.5243939866400069]], [[-1.283680379531603, 4.1552563181801752, -4.911313554906533,
0.37281516674407023], [1.5768230935383114, -3.0233278144508207, -4.7991290832674132, 3.4503111405040645],
[-2.3237265585848226, 0.2274633573855489, 2.3274111425324779, -4.1704724006216409]]], [[[-4.2633251380791375,
-1.9100497118342004, 1.5928238534215238, -2.7780624400412766], [-3.1596925392609005, 3.2176969458930342,
2.1318460515569053, -3.1445188936630188], [-1.137425431934461, 1.5703802533152271, -3.4791659241675479,
-3.5867393240248582]], [[2.1699709698201635, -2.3570839892635016, -3.3711636376922396, 4.2648767483984464],
[2.8043833368442286, -2.977755813896926, 4.463662216050432, 3.8299754064476801], [-5.0448572884862397,
3.3611306536250058, -4.5723227753434994, 4.4880686290252481]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-6.1909769509085075, 1.3026806996421056, -7.7551881204533109,
-5.6607493064521739], [-4.8145335528350079, -3.3915695769792498, -3.8751306494251336, 1.1902198306170559],
[1.0533137564619119, -0.64021612372579284, -6.8458192217604283, 1.4495272499659588]], [[-1.6383897073148499,
-2.0846141091397907, -6.2303318001363559, -5.5440280331966871], [-2.8206963299380083, -6.6391988588550364,
-1.6824409677555505, -1.0277659883358199], [-7.2575988558517528, 0.14334796607613765, 1.8075321459042826,
0.73992751899050013]]], [[[-2.0519582667299208, -3.2563466135587662, -0.96327393993354304, -0.83022230517353668],
[-5.1355161648951739, 1.2893643559345058, -3.9838565729400566, -3.5995387922005317], [-2.7502763172791589,
1.6557355268617728, -0.32827745895065608, -6.3282239200273578]], [[-4.0875103129189538, 1.3514263847928243,
-7.7151434882938839, -2.4310147666432806], [-1.2270068398490395, -5.8271577478381715, -7.602959016654764,
0.64648120711671364], [-5.1275564919721734, -2.576366576001802, -0.47641879085487293, -6.9743023340089918]]],
[[[-7.0671550714664884, -4.7138796452215512, -1.2110060799658271, -5.5818923734286274], [-5.9635224726482514,
0.41386701250568336, -0.67198388183044555, -5.9483488270503697], [-3.9412553653218119, -1.2334496800721237,
-6.2829958575548988, -6.390569257412209]], [[-0.63385896356718741, -5.1609139226508525, -6.1749935710795905,
1.4610468150110956], [0.0005534034568777102, -5.7815857472842769, 1.6598322826630811, 1.0261454730603292],
[-7.8486872218735906, 0.55730072023765498, -7.3761527087308503, 1.6842386956378972]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.3101673523710691, 0.048409361416743124]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([0.70887806236646611, -0.73932065177372408]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.15960287006)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([2.1505644823090515, -1.1111935086452744]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.45072480769555145, -1.8989235218357416]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([-2.0708546339036071, 2.2714034647505121]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-0.16265022615439584, -0.29272834777410406]))
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.8495632665872739, -2.2808524667130694])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-3.920417900490881, 4.5522559314635815]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-2.0122134927416697, 1.9881241189389653]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[4.703380807076492, -4.2567944639019304, -2.0784707905046593, 0.18023637488621791,
1.1164321428411501], [3.3809585074696322, 1.5795463086222137, 1.5300027430790495, -1.6695215658775489,
-4.9671698822372887], [-0.56875186129757704, -0.88988163011215704, 1.0953422249288387, 1.2629450835517639,
1.9829321534877584], [-2.3470243950738103, -1.5345245349366401, 1.7913793425402638, 3.2778179482022125,
3.2743088989127749]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[2.1331140495285128, 4.902243346193929, -3.8569193535703947,
-1.2051025219030698, 4.8526791592750644], [-1.9285295160668192, -2.2715983725035862, -1.6280809153232632,
0.63571110979312273, -4.5616322454088643], [1.1933837591252878, -2.4657544917793928, 3.8511059475300904,
-3.0018611957635444, 3.560382804940847], [-4.284584247208282, -4.3366343606789348, 3.6048395763720524,
-2.2301793774115106, 4.6397261587379131]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0560012612314)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.6473795458450571, -4.3127957251333653, -2.1344720517360942, 0.12423511365478301,
1.0604308816097152], [3.3249572462381973, 1.5235450473907788, 1.4740014818476146, -1.7255228271089837,
-5.0231711434687236], [-0.62475312252901194, -0.94588289134359194, 1.0393409636974038, 1.206943822320329,
1.9269308922563235], [-2.4030256563052452, -1.590525796168075, 1.7353780813088289, 3.2218166869707776,
3.21830763768134]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[2.0771127882970779, 4.8462420849624941, -3.9129206148018296,
-1.2611037831345047, 4.7966778980436295], [-1.9845307772982541, -2.3275996337350211, -1.6840821765546981,
0.57970984856168783, -4.6176335066402991], [1.1373824978938529, -2.5217557530108277, 3.7951046862986555,
-3.0578624569949793, 3.5043815437094121], [-4.3405855084397169, -4.3926356219103697, 3.5488383151406175,
-2.2861806386429455, 4.5837248975064782]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[0.044613582775737015, -0.22965054883260905, -3.3954728255423361, -0.043404784226975579,
-0.81018025865095922], [4.0980455142640473, 3.3299876326958326, 4.4694158188546833, 0.047800124529065791,
-4.1128886475115927], [-0.86793714814288414, 3.7852706993586231, 2.8168181178475837, -2.6081900317073039,
1.795227525921204], [-2.7964436060814792, 2.46599228887926, -4.3894587372918519, -3.0809581135280197,
4.5629513161933648]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[0.18467263707487369, -2.906541382403959, -4.2471361917218733,
1.7478696798949915, -2.0555035204044225], [-4.1703824796767011, -0.58145273211245829, -1.3034416354534684,
-4.4238643252257699, -3.0019960418182654], [-0.011560599410600503, 4.5614736908410478, -4.1865499712522745,
0.41611035316936196, 1.4719370557053075], [3.3285499812876207, 4.2147545548351992, 3.8796865015190463,
-2.8665673368928459, 3.8754754018195001]]))
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-0.34040680852948757, 0.51480179015857086, 2.6579250902566542, -3.8908104282358877,
-1.0766494604779266], [-1.7785348143550985, 1.7875285221080928, -0.26464821727786259, 3.7856697734154743,
0.14935084548977784], [1.6454427368239299, -3.0878902261983701, 2.1577262475041596, -3.540342914142153,
2.8529020416879671], [2.8849125795379305, -3.1409630887157123, -0.30215664293811351, 3.5493007526176896,
0.27226779139430857]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.38502039130522459, -0.74445233899117991, -6.0533979157989908, 3.8474056440089122,
0.26646920182696743], [5.8765803286191458, 1.5424591105877399, 4.7340640361325459, -3.7378696488864085,
-4.2622394930013705], [-2.5133798849668141, 6.8731609255569932, 0.65909187034342409, 0.93215288243484906,
-1.0576745157667631], [-5.6813561856194097, 5.6069553775949723, -4.0873020943537384, -6.6302588661457094,
4.2906835247990562]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.52507944560436126, -3.4213431725625298, -6.9050612819785275,
5.6386801081308793, -0.97885405992649588], [-2.3918476653216025, -2.3689812542205511, -1.0387934181756058,
-8.2095340986412442, -3.1513468873080432], [-1.6570033362345304, 7.6493639170394179, -6.3442762187564341,
3.9564532673115149, -1.3809649859826596], [0.44363740174969024, 7.3557176435509115, 4.1818431444571598,
-6.4158680895105356, 3.6032076104251916]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-0.70323441272603926, -1.4205742401701604], [-3.6004008923276585, 4.1739347100888349]],
[[-2.7687391296703767, -0.96114141211843496], [0.45711266950319906, 0.36713165606152121]], [[3.8726070188081287,
2.6611494194452137], [-0.28060302358441547, 1.0399275995737964]], [[2.5912385881777, -0.12172669528696911],
[1.831517522951442, -4.9891623764024926]], [[3.8572507842255241, 2.9719918728052663], [0.42882676434271261,
-1.4826468418372341]], [[0.16110396579090835, 4.8052378752678955], [2.4890225545274554,
-1.4594734254395068]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.4601998637619467, 3.5105292543746671], [-1.9715134513187751,
1.6897677346566677]], [[0.99895689216195205, 3.7908023259957879], [-2.9811497902134496, 0.46336396583979944]],
[[-2.0979181014824011, 0.68992077008736707], [4.5817275596392033, 3.1112543881649586]], [[-1.0666850119171398,
-3.7136243224538679], [-2.1842168128700248, -0.60998709362389292]], [[-1.0817587775668578, 1.1357523207967555],
[0.72114300996433212, 2.0871085948686607]], [[2.6196090777455074, -4.8403131105182826], [4.4462612480444346,
2.6275786734235638]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(3.40075496466)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-4.1039893773891789, -4.8213292048333001], [-7.0011558569907981, 0.77317974542569523]],
[[-6.1694940943335164, -4.3618963767815746], [-2.9436422951599406, -3.0336233086016184]], [[0.4718520541449891,
-0.73960554521792599], [-3.6813579882475551, -2.3608273650893432]], [[-0.80951637648543961, -3.5224816599501088],
[-1.5692374417116977, -8.3899173410656331]], [[0.4564958195623845, -0.42876309185787331], [-2.971928200320427,
-4.8834018065003733]], [[-3.2396509988722313, 1.4044829106047558], [-0.91173241013568429,
-4.8602283901026464]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.059444899098807014, 0.10977428971152747], [-5.3722684159819147,
-1.7109872300064719]], [[-2.4017980725011876, 0.39004736133264828], [-6.3819047548765893, -2.9373909988233402]],
[[-5.4986730661455407, -2.7108341945757726], [1.1809725949760637, -0.28950057649818106]], [[-4.4674399765802795,
-7.1143792871170071], [-5.5849717775331644, -4.0107420582870326]], [[-4.4825137422299974, -2.2650026438663842],
[-2.6796119546988075, -1.3136463697944789]], [[-0.7811458869176322, -8.2410680751814223], [1.0455062833812949,
-0.77317629123957587]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.8893927498914151, -3.9495986710021471], [2.0674301637688552, -4.9323681378020368]],
[[-3.9365223323164567, -3.9166796931279513], [-2.1295831296849688, 0.049270642730291137]], [[1.1604521699930164,
-4.7263968957110194], [0.18403419227820805, -3.9919770732677948]], [[-4.4683480884742268, 3.1077188243660192],
[0.090355977211302729, -0.013539049772621325]], [[1.2239143556433882, 4.66468811676115], [4.6443599318212119,
2.902664355759085]], [[3.1499666861977964, 3.5678517696258449], [0.73557701807290599,
-4.1703133219986768]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[0.62745401025262382, 0.69024538347902542], [4.3685303267738433,
2.2109723240557235]], [[-0.7348498808881363, -2.7513236139357309], [2.5887407011037489, 4.1931952710033542]],
[[2.1336250254996258, -2.1610465999144091], [-4.054796877122568, 0.054975312915938268]], [[2.8778982280083021,
0.031841424972327559], [-1.6040852288365626, -0.14653197703489251]], [[1.0241081083490533, 2.0236436389548764],
[-4.7683548819587331, 0.81201234013234735]], [[-3.2923450240347405, 2.2531528995219965], [-3.594199051432386,
-1.9523442452177875]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.67454553417657603, 2.9833990689244789], [-3.9375622829117427, 0.0094498156860893801]],
[[2.1574617938010734, -0.48892733726965609], [0.62118276066421352, 0.99065918564407696]], [[1.7968244154456219,
-1.6314349433046926], [1.8612952961850224, 4.6630470176393288]], [[0.43763307675500052, 4.0271951272236688],
[-1.1711764825930993, -4.5547560714878275]], [[2.514477748308436, 3.7600620047710827], [1.5805136896170069,
2.4948517124974012]], [[-0.74781838229224817, -2.9876928953003903], [4.1339271192034222, 4.4719827170790509]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.5639382840679912, -6.932997739926626], [6.004992446680598, -4.9418179534881261]],
[[-6.0939841261175296, -3.4277523558582952], [-2.7507658903491823, -0.94138854291378582]], [[-0.63637224545260551,
-3.0949619524063268], [-1.6772611039068144, -8.6550240909071228]], [[-4.9059811652292273, -0.91947630285764959],
[1.261532459804402, 4.5412170217152061]], [[-1.2905633926650477, 0.90462611199006737], [3.063846242204205,
0.40781264326168376]], [[3.8977850684900446, 6.5555446649262352], [-3.3983501011305162,
-8.6422960390777277]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.047091523923952217, -2.2931536854454535], [8.3060926096855852,
2.2015225083696341]], [[-2.8923116746892097, -2.2623962766660748], [1.9675579404395354, 3.2025360853592773]],
[[0.33680061005400397, -0.52961165660971643], [-5.9160921733075904, -4.6080717047233906]], [[2.4402651512533016,
-3.9953537022513412], [-0.43290874624346332, 4.4082240944529349]], [[-1.4903696399593827, -1.7364183658162062],
[-6.34886857157574, -1.6828393723650539]], [[-2.5445266417424923, 5.2408457948223868], [-7.7281261706358082,
-6.4243269622968384]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[3.1002455029763922, 2.6515488300516923, -0.77582358496211956, -3.4443694355246803],
[-2.6599091620789581, -0.70044327546902529, -4.3223485855396966, 4.9338402947088049], [-4.5546987200991147,
-4.159833516760548, -1.2113818643763619, 1.341501344402797]], [[-0.99132126989665803, -3.81966827017445,
-1.5631671743562592, -2.9170370396917167], [0.94015514336519956, -4.5328623228274036, 2.5469993786586862,
4.5298447080413311], [-1.8826808741220304, -0.21100480137345734, -1.7750931594239239, -3.5343470478632764]]],
[[[-3.4624410933639691, 3.7419877938482422, -4.1641241285521557, -2.8763768520849711], [4.3838179808162643,
-0.076650368742670949, -2.2790272387608601, 1.4407514353417152], [-0.58059366739859364, 3.0282179950037378,
4.3946428646333242, -3.9361840734571896]], [[-0.40769305246403231, -0.93123230765280152, -3.5500981163613665,
-1.4382421516555786], [0.18862577968690264, 3.8234595158976035, 1.2783334948832605, -0.84599833008897818],
[-1.5452449895609535, -2.1285283532469434, 2.9517034908101669, -1.043778516582341]]], [[[2.5188074736534176,
4.926760464276164, -1.2494158315784532, -4.1847607799981805], [1.764772573553314, 4.6090994448443769,
-3.7864884573437072, 2.5743244083963681], [-0.44624416686502322, -0.44288726525437028, -2.5180469174818598,
-4.8009656021603]], [[-1.0967276921708047, -1.5639987059537273, -3.3122649580537331, -3.947879272385495],
[4.1267460589959857, -4.5801997177900287, 0.85366271506547697, -3.5573421152778972], [-4.7127368302025108,
-4.5592524679039892, -1.8586387462495613, -3.2614675219884837]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.6140016210408508, -4.1545999292001445, 4.9863169403898908,
-2.2007289242442383], [-2.3634275248295822, 1.4929955627211893, 1.1905831175627091, -3.1298255396253936],
[-0.78867439130174599, -2.5664248245819756, -1.882393556334109, -2.3300345925878529]], [[3.7578772846055983,
-1.9632657478837121, -1.3792653830852455, -0.23840250166856869], [-1.650781665029756, -3.2744446113480907,
-1.2541229166086589, -2.3471598629273149], [-1.939332795628903, 0.81542234976851624, 0.52422540705571663,
0.91808367692950554]]], [[[-3.0689349511345867, -4.8032602579819264, 3.769084882991141, -1.5864959564378189],
[-3.2063200431555905, -0.3347729502698602, 1.763270929850381, 0.65936335478094321], [-3.6143633139881959,
0.15424644431103118, 3.7156782910709154, -3.2826914978804203]], [[-0.091940996157960697, 2.5331247115220021,
3.4383904670893202, 0.77887041122794898], [4.2850997491436988, 3.3877021574758341, 3.9303516193668084,
0.97217787674818279], [-1.8219977615256742, 3.7582967180633755, -3.967674705101544, 3.2183851949652524]]],
[[[3.8000102844693906, -2.9266220460152672, 0.11901081743168795, -0.70455205529677301], [4.6787843021952913,
-3.2637583894745239, 4.6693989140352041, 2.042172937625808], [-2.9445501417858964, 0.36254085518902812,
2.8333171427728354, -2.7757509476245721]], [[3.8180860212706147, -3.4817247466262815, -3.2683613783585006,
-2.0706219843820262], [4.8065072235822566, 2.2788211866672707, 3.8562835841415382, -1.1633706258500731],
[2.652336823163191, -2.6060953909144513, 0.62089818312127321, -1.6242126976534612]]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-4.55573857649)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.6559840794689205, 7.2072874065442205, 3.7799149915304087, 1.1113691409678479],
[1.8958294144135701, 3.8552953010235029, 0.23338999095283164, 9.4895788712013331], [0.0010398563934135296,
0.3959050597319802, 3.3443567121161664, 5.8972399208953252]], [[3.5644173065958702, 0.73607030631807824,
2.992571402136269, 1.6387015368008115], [5.4958937198577278, 0.02287625366512458, 7.1027379551512144,
9.0855832845338593], [2.6730577023704978, 4.3447337751190709, 2.7806454170686044, 1.0213915286292519]]],
[[[1.0932974831285591, 8.2977263703407704, 0.39161444794037248, 1.6793617244075572], [8.9395565573087925,
4.4790882077498573, 2.2767113377316681, 5.9964900118342435], [3.9751449090939346, 7.583956571496266,
8.9503814411258524, 0.61955450303533866]], [[4.1480455240284959, 3.6245062688397267, 1.0056404601311617,
3.1174964248369497], [4.7443643561794309, 8.3791980923901317, 5.8340720713757888, 3.70974024640355],
[3.0104935869315748, 2.4272102232455848, 7.5074420673026951, 3.5119600599101872]]], [[[7.0745460501459458,
9.4824990407686922, 3.3063227449140751, 0.3709777964943477], [6.3205111500458422, 9.1648380213369052,
0.76925011914882102, 7.1300629848888963], [4.109494409627505, 4.1128513112381579, 2.0376916590106684,
-0.24522702566777177]], [[3.4590108843217235, 2.991739870538801, 1.2434736184387951, 0.60785930410703326],
[8.6824846354885139, -0.024461141297500433, 5.4094012915580052, 0.99839646121463099], [-0.15699825370998255,
-0.0035138914114609676, 2.697099830242967, 1.2942710545040446]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.94173695545167746, 0.40113864729238369, 9.542055516882419,
2.35500965224829], [2.192311051662946, 6.0487341392137175, 5.7463216940552373, 1.4259130368671347],
[3.7670641851907822, 1.9893137519105526, 2.6733450201584192, 2.2257039839046753]], [[8.3136158610981266,
2.5924728286088161, 3.1764731934072827, 4.3173360748239595], [2.9049569114627722, 1.2812939651444375,
3.3016156598838693, 2.2085787135652133], [2.6164057808636252, 5.3711609262610445, 5.0799639835482449,
5.4738222534220338]]], [[[1.4868036253579415, -0.24752168148939813, 8.3248234594836692, 2.9692426200547093],
[1.3494185333369377, 4.220965626222668, 6.3190095063429093, 5.2151019312734714], [0.94137526250433234,
4.7099850208035594, 8.2714168675634436, 1.273047078612108]], [[4.4637975803345675, 7.0888632880145304,
7.9941290435818484, 5.3346089877204772], [8.8408383256362271, 7.9434407339683624, 8.4860901958593367,
5.527916453240711], [2.7337408149668541, 8.3140352945559037, 0.58806387139098426, 7.7741237714577807]]],
[[[8.3557488609619188, 1.629116530477261, 4.6747493939242162, 3.8511865211957552], [9.2345228786878195,
1.2919801870180043, 9.2251374905277324, 6.5979115141183362], [1.6111884347066319, 4.9182794316815563,
7.3890557192653636, 1.7799876288679561]], [[8.3738245977631429, 1.0740138298662467, 1.2873771981340276,
2.4851165921105021], [9.3622458000747848, 6.834559763159799, 8.4120221606340664, 3.3923679506424551],
[7.2080753996557192, 1.9496431855780769, 5.1766367596138014, 2.931525878839067]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.1869721643026576, 0.35542091272423715, 2.5099944114031967, 4.7276012581949995],
[-0.23596027111215712, 3.2557128306673206, -2.4174678213407566, 4.9025765849007588], [3.4987602616867228,
-2.3969967727517094, 2.614715035832643, -3.9538109091356577]], [[0.54151166641114745, 4.3433313907072311,
-3.9824411189395126, 0.11193040884063787], [-4.3326960505433521, -2.6555021449849603, -1.6650005107909016,
-0.21278258756168267], [2.9438726263016104, 4.614591333740627, -1.4283352855346321, 4.195747529596801]]],
[[[0.4129039465707498, 0.25218586208094607, 4.2227877593235625, -3.8395686827717723], [-4.246422814789943,
-4.2708029152046789, -4.4791253262093615, 2.3703854064691221], [-0.32074671911367325, -4.0633264555676574,
-4.8034904727622223, 0.101245496731595]], [[3.3860052077100544, 4.4048456672981686, 3.3258905421337257,
-0.60591078242426555], [2.9574702297232829, 2.9390786518156196, 3.0627580449874809, -2.1902821038190523],
[1.2765769390449559, 4.5442832941192819, 0.47031486471564055, -3.2094801674304509]]], [[[1.4972627407797212,
-2.7514173987810633, 0.19744444113354387, 1.3720920976100972], [-3.147124860705004, -3.6707691951555885,
1.1521564952279704, -0.12493802519996233], [1.3717811158015873, -1.737983464544548, -2.5919544001996897,
-4.4195022009129206]], [[-3.5078213357756582, 1.5909514876001909, 3.932618549290213, 0.32844467348406869],
[-0.037083415286228494, 2.358949404615915, -3.7082781631298478, -4.9441324919087766], [1.219588665287433,
-2.1155364750524797, 2.3443039764677165, 4.1618790582351313]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[3.8216987557975131, -0.59039813916696193, -1.9474433412604117,
4.1666345075852202], [1.0033840403657788, -1.8365638623400207, -1.1472895447555285, 0.49043998461267968],
[1.525782098623524, 0.98710575843395354, 1.9521603305269073, 1.4982217977497818]], [[4.8105014981222372,
0.18255767851204219, 0.10092997041413909, 2.3610713615733667], [3.8639541584797801, 1.8455276769077198,
3.9278199867001007, 2.5501176762845867], [3.2925051662999447, 0.78129602184334157, -0.73105877010655362,
2.9378923845982694]]], [[[1.3162347911484948, -1.7534583809398363, -4.4745574675152744, 0.84388146264593455],
[-2.1398633576757309, 1.6224556269216279, 4.0151064679341637, 0.81646760002277574], [0.95506629968888479,
-3.384786519820715, 2.08961451298733, 1.4802214615087061]], [[2.5752388025402837, -2.7094797245847468,
-2.6808155024703106, -1.7780191613070642], [-0.58755728186204248, -4.3097624692690948, 3.6757907841395685,
-1.8312242243207608], [-3.7229135985460826, -1.5786991892133564, 2.6894504757052617, -0.48567336902160463]]],
[[[3.4562176552233623, -1.5291903913231595, 4.9276217294297595, -1.4641622460496571], [-3.9633150641051529,
-1.3895475276782743, -2.0928641563143735, 4.286214622292805], [-0.016872120519226819, -0.86571000346058913,
4.2635805792181465, 4.0351866281897113]], [[-1.973695982407413, -4.452260246087465, -2.5681734906597109,
3.0954829513656215], [2.6526834215550927, -4.3976717675273207, 2.0111485813735106, 2.7969396373439324],
[-0.72100288848623784, 1.4868693846138363, 2.3876845459322045, -3.759851286518614]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-1.2326165508314046, 0.019536700697927678, 3.3313535404093759, -2.4782775769684271],
[3.9342491756801525, 1.2904741959913864, -2.7701975380199206, 2.4757520771582744], [2.5202328466158281,
-1.3683915774027189, 3.4678638218372768, -2.2884507446983129]], [[-4.9275394706777931, 4.7975831194456333,
1.7829898690658723, -0.96339421834763073], [-2.7923805247323799, -0.026981154987572253, 2.5136604629187271,
0.14658337947380495], [1.1254475424349959, 4.8000437885357261, 3.3479331374253167, 1.6298765760037002]]],
[[[-0.46473842692243572, 1.2430212762010644, -0.23618382206216726, -1.2230171932711418], [2.0127498669810855,
-0.31475870950595031, -0.20645609212011973, -4.9825089187683691], [-4.6108703987985988, -0.47963035537661725,
-3.1919702863790422, -3.9993603357626117]], [[3.8402219409685951, 3.04406815317755, 4.7640360318949195,
1.5279973254325983], [-4.9716807317737235, -3.4706635767559693, -1.2581696190523903, -2.591452040312936],
[1.6191001515432157, -3.5419762128533741, 0.92904425652178801, 4.6966930122512043]]], [[[-2.4787875268428614,
4.8717538415307775, 3.6264063974305554, 2.0645154974740256], [-4.5070489852671329, 2.3540394703493703,
3.2007816723140134, -0.44359603196672026], [2.5406621078154732, 3.6651768892659895, -2.7039262200534422,
-1.9309627063916244]], [[-0.037762488646412962, -4.6825147640959859, -3.1180187992817956, -0.3407644296025687],
[-1.6601757648009907, -1.0174825465103088, 0.060955158106047236, 1.2341204474061849], [-0.24621306712976931,
-1.3620636349151272, -0.12322079758969373, 2.3717593913603183]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.4195887151340623, 0.33588421202630947, -0.82135912900617924, 7.2058788351634266],
[-4.1702094467923096, 1.9652386346759343, 0.35272971667916408, 2.4268245077424844], [0.97852741507089469,
-1.0286051953489905, -0.85314878600463384, -1.6653601644373448]], [[5.4690511370889405, -0.45425172873840225,
-5.7654309880053844, 1.0753246271882686], [-1.5403155258109722, -2.628520989997388, -4.1786609737096292,
-0.35936596703548762], [1.8184250838666145, -0.18545245479509909, -4.7762684229599488, 2.5658709535931008]]],
[[[0.87764237349318552, -0.99083541412011833, 4.4589715813857298, -2.6165514895006305], [-6.2591726817710285,
-3.9560442056987286, -4.2726692340892418, 7.3528943252374912], [4.2901236796849256, -3.5836961001910401,
-1.6115201863831801, 4.1006058324942067]], [[-0.45421673325854073, 1.3607775141206186, -1.4381454897611938,
-2.1339081078568638], [7.9291509614970064, 6.4097422285715888, 4.3209276640398713, 0.40116993649388366],
[-0.34252321249825979, 8.0862595069726559, -0.45872939180614747, -7.9061731796816552]]], [[[3.9760502676225826,
-7.6231712403118408, -3.4289619562970115, -0.69242339986392842], [1.359924124562129, -6.0248086655049589,
-2.0486251770860431, 0.31865800676675793], [-1.1688809920138858, -5.4031603538105379, 0.11197181985375249,
-2.4885394945212962]], [[-3.4700588471292453, 6.2734662516961768, 7.0506373485720086, 0.66920910308663739],
[1.6230923495147622, 3.3764319511262237, -3.7692333212358951, -6.1782529393149614], [1.4658017324172024,
-0.7534728401373525, 2.4675247740574102, 1.7901196668748129]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0543153066289177, -0.60993483986488961, -5.2787968816697877,
6.6449120845536473], [-2.9308651353143738, -3.127038058331407, 1.6229079932643922, -1.9853120925455947],
[-0.99445074799230415, 2.3554973358366724, -1.5157034913103695, 3.7866725424480947]], [[9.7380409688000302,
-4.6150254409335911, -1.6820598986517332, 3.3244655799209974], [6.6563346832121599, 1.872508831895292,
1.4141595237813736, 2.4035342968107818], [2.1670576238649488, -4.0187477666923845, -4.0789919075318704,
1.3080158085945692]]], [[[1.7809732180709306, -2.9964796571409007, -4.2383736454531071, 2.0668986559170763],
[-4.1526132246568164, 1.9372143364275782, 4.2215625600542834, 5.7989765187911448], [5.5659366984874836,
-2.9051561644440977, 5.2815847993663727, 5.4795817972713179]], [[-1.2649831384283114, -5.7535478777622968,
-7.4448515343652302, -3.3060164867396624], [4.384123449911681, -0.83909889251312553, 4.9339604031919588,
0.76022781599217515], [-5.3420137500892988, 1.9632770236400177, 1.7604062191834737, -5.1823663812728089]]],
[[[5.9350051820662237, -6.400944232853937, 1.3012153319992041, -3.5286777435236827], [0.54373392116198005,
-3.7435869980276446, -5.293645828628387, 4.7298106542595253], [-2.5575342283347, -4.5308868927265786,
6.9675067992715887, 5.9661493345813357]], [[-1.935933493761, 0.23025451800852093, 0.54984530862208469,
3.4362473809681902], [4.3128591863560839, -3.3801892210170119, 1.9501934232674634, 1.5628191899377475],
[-0.47478982135646852, 2.8489330195289635, 2.5109053435218982, -6.1316106778789319]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.42413566075)+(1.-msk_arg0)*(2.73592046896)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0730314190245)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(2.35110424173)+(1.-msk_ref)*(2.66288904994)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.38585027921)+(1.-msk_arg0)*(-2.14546935212)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.0449404678521192, -2.9654578889240057])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4307907470591283,
0.57960760971699665])+(1.-msk_ref)*numpy.array([-3.1904098199744872, 0.81998853680163775])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.15276640076)+(1.-msk_arg0)*(-2.04284766814)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.5429314638433684, 2.0318827224945402, -2.3636856893688076, 3.4855417570765717,
0.44952339669472341], [2.5403509140391156, 2.3524971436536095, 3.9461465487262188, 2.6955339698780154,
-0.45702899742654868], [-1.0602022717036155, 0.74771157767510843, 1.6452939357358289, -3.0322095528230921,
1.6787335078454735], [-4.263078102519902, 3.2046384335109863, 4.0147512257312048, 3.3998288702285713,
-0.56118778404289138]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[4.6956978646047602, 0.12088367826685165, 4.5164520901301994, -1.3327753563151798,
1.7032430040666684], [-0.38758451327772381, -0.19973074289221771, -1.793380147964827, -0.5427675691166236,
2.6097953981879405], [3.2129686724650073, 1.4050548230862834, 0.50747246502556287, 5.1849759535844839,
0.47403289291591832], [6.4158445032812939, -1.0518720327495945, -1.861984824969813, -1.2470624694671795,
2.7139541848042832]])+(1.-msk_ref)*numpy.array([[0.50008379570506278, -4.0747303906328458, 0.32083802123050198,
-5.5283894252148773, -2.4923710648330291], [-4.5831985821774213, -4.3953448117919152, -5.9889942168645245,
-4.7383816380163211, -1.585818670711757], [-0.98264539643469018, -2.7905592458134141, -3.6881416038741346,
0.98936188468478647, -3.7215811759837791], [2.2202304343815964, -5.247486101649292, -6.0575988938695104,
-5.4426765383668769, -1.4816598840954143]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.30825297654)+(1.-msk_arg0)*(-3.92076322418)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-0.52002332126128437, 4.3478222442071139], [3.3434922005534364, 2.8013302606159396]],
[[-2.3200079969586795, -3.0556917667690642], [-2.7103276420969582, 4.1511200748037105]], [[-0.92404095393396624,
2.6484690327098859], [-2.1529217611726503, 4.4602897709717144]], [[0.58271708006920253, 1.9322598870751975],
[-3.5184596230462182, -4.4222029485403436]], [[-4.3953168785776278, -4.450145776704125], [4.2137072146995536,
3.8966485797913304]], [[3.1838339108927798, -3.6438064267677328], [1.3789445362861974, -2.9975552731311272]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[3.8282762978010325, -1.0395692676673658], [-0.035239224013688286,
0.50692271592380855]], [[5.6282609734984277, 6.3639447433088119], [6.0185806186367063, -0.84286709826396233]],
[[4.2322939304737144, 0.65978394382986227], [5.4611747377123985, -1.1520367944319663]], [[2.7255358964705456,
1.3759930894645507], [6.8267125995859663, 7.7304559250800917]], [[7.7035698551173759, 7.7583987532438732],
[-0.90545423815980541, -0.58839560325158224]], [[0.12441906564696836, 6.952059403307481], [1.9293084402535507,
6.3058082496708749]]])+(1.-msk_ref)*numpy.array([[[-3.4007399029178136, -8.2685854683862114], [-7.2642554247325339,
-6.7220934847950371]], [[-1.6007552272204184, -0.86507145741003377], [-1.2104355820821397, -8.071883298982808]],
[[-2.9967222702451317, -6.5692322568889843], [-1.7678414630064476, -8.3810529951508119]], [[-4.5034803042483009,
-5.853023111254295], [-0.40230360113287977, 0.50143972436124562]], [[0.47455365439852981, 0.52938255252502708],
[-8.1344704388786511, -7.8174118039704279]], [[-7.1045971350718773, -0.27695679741136514], [-5.2997077604652958,
-0.92320795104797071]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(4.28115160685)+(1.-msk_arg0)*(-2.99624588284)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-3.7034845683259832, -3.2006988280486115, 4.6850337347787345, -1.5431340070704103],
[-3.9508001556883876, 4.7231128873762902, -3.0051096732527691, -0.071944916970104522], [3.2109725637398565,
4.0910170733379978, -3.7166755556626772, -4.402146700420734]], [[-1.5273991623031669, -1.4865381526416344,
3.902360473786171, -1.3538484671917517], [0.38707743115008331, 4.3855048056490773, 1.9022231675241139,
1.397387379628614], [1.0431068102446126, 3.0934379513218886, 2.0138255231319624, 4.2870052231295865]]],
[[[-4.2737086360299941, 4.2752748398653857, -3.7092106416006629, 1.417380944080846], [-2.4275128587779737,
-2.879911926405645, -4.23153844815229, -0.30555854124221682], [-2.6571106905165331, 2.6754859746804112,
-4.5544081791240201, -0.020082609244357563]], [[1.0570642052363857, -1.7647078574502792, 2.6330635742775668,
3.717540829723692], [4.9220552078075279, -3.9060168420798869, 1.4799017868437296, 2.7842835488914588],
[-2.0839669385912343, -4.8850626605172867, 1.7595980725429907, 3.0026383083452117]]], [[[-0.83195539201513036,
-1.2109400306251725, 2.0638657571201078, -0.86905066581365009], [-0.54092453152611775, 3.4954317917180884,
3.7826658876966359, -2.5779636206330894], [1.6720368874738147, 0.42564364358069096, -4.9027760864384096,
0.66861897918883617]], [[-4.1302737255553801, -3.2949127465748109, 1.5706320204575341, -2.2912291830881903],
[-2.19574275564025, 3.983182476523945, 2.032922034582441, -2.7459308093848711], [4.6025690264891459,
3.7012963844874829, 0.1748188819614116, 4.2002322255258893]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[7.984636175171131, 7.4818504348937598, -0.40388212793358669, 5.8242856139155581],
[8.2319517625335354, -0.44196128053114236, 7.286261280097917, 4.3530965238152524], [1.0701790431052913,
0.19013453350715004, 7.997827162507825, 8.6832983072658827]], [[5.8085507691483151, 5.7676897594867818,
0.37879113305897683, 5.6350000740368991], [3.8940741756950645, -0.10435319880392946, 2.3789284393210339,
2.8837642272165338], [3.2380447966005352, 1.1877136555232592, 2.2673260837131854, -0.0058536162844387007]]],
[[[8.5548602428751419, 0.005876766979762138, 7.9903622484458108, 2.8637706627643018], [6.7086644656231211,
7.1610635332507933, 8.5126900549974387, 4.5867101480873647], [6.938262297361681, 1.6056656321647367, 8.835559785969167,
4.3012342160895054]], [[3.2240874016087622, 6.0458594642954271, 1.6480880325675811, 0.5636107771214558],
[-0.64090360096238008, 8.1871684489250356, 2.8012498200014182, 1.496868057953689], [6.3651185454363821,
9.1662142673624345, 2.5215535343021571, 1.2785132984999361]]], [[[5.1131069988602782, 5.4920916374703204,
2.2172858497250401, 5.1502022726587979], [4.8220761383712656, 0.78571981512705946, 0.49848571914851192,
6.8591152274782372], [2.6091147193713331, 3.8555079632644569, 9.1839276932835574, 3.6125326276563117]],
[[8.4114253324005279, 7.5760643534199588, 2.7105195863876137, 6.5723807899333382], [6.4768943624853978,
0.29796913032120287, 2.2482295722627068, 7.0270824162300194], [-0.32141741964399806, 0.57985522235766496,
4.1063327248837362, 0.080919381319258576]]]])+(1.-msk_ref)*numpy.array([[[[0.70723868548106505, 0.20445294520369339,
-7.6812796176236526, -1.4531118757745078], [0.9545542728434695, -7.7193587702212083, 0.0088637904078510132,
-2.9243009658748136], [-6.2072184465847746, -7.0872629561829159, 0.72042967281775905, 1.4059008175758159]],
[[-1.4688467205417512, -1.5097077302032837, -6.8986063566310891, -1.6423974156531664], [-3.3833233139950014,
-7.3817506884939954, -4.898469050369032, -4.3936332624735321], [-4.0393526930895307, -6.0896838341668067,
-5.0100714059768805, -7.2832511059745046]]], [[[1.277462753185076, -7.2715207227103038, 0.71296475875574483,
-4.4136268269257641], [-0.56873302406694437, -0.11633395643927313, 1.2352925653073719, -2.6906873416027013],
[-0.33913519232838496, -5.6717318575253293, 1.558162296279102, -2.9761632736005605]], [[-4.0533100880813038,
-1.2315380253946389, -5.6293094571224849, -6.7137867125686101], [-7.918301090652446, 0.90977095923496876,
-4.4761476696886477, -5.7805294317363769], [-0.91227894425368383, 1.8888167776723686, -4.7558439553879088,
-5.9988841911901298]]], [[[-2.1642904908297877, -1.7853058522197456, -5.0601116399650259, -2.127195217031268],
[-2.4553213513188004, -6.4916776745630065, -6.778911770541554, -0.41828226221182874], [-4.6682827703187328,
-3.4218895264256091, 1.9065302035934915, -3.6648648620337543]], [[1.134027842710462, 0.29866686372989282,
-4.5668779033024522, -0.70501669975672776], [-0.8005031272046681, -6.9794283593688631, -5.0291679174273591,
-0.250315073460047], [-7.598814909334064, -6.697542267332401, -3.1710647648063297, -7.1964781083708074]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([0.57185536765716005,
-4.5016440600070959])+(1.-msk_arg0)*numpy.array([-0.4418100919929735, 1.7838290839713755])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.01685432532)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4449989576654145,
-8.5184983853296714])+(1.-msk_ref)*numpy.array([-4.4586644173155481, -2.2330252413511991])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([-4.1734209340603439,
4.5527582003296185])+(1.-msk_arg0)*numpy.array([-1.7000682822887789, 0.76683988376374757])
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-1.5016152385157842, 0.80809700227400683])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-2.6718056955445597,
3.7446611980556117])+(1.-msk_ref)*numpy.array([-0.19845304377299477, -0.041257118510259261])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[3.3500126396534871, 3.4943903203535527, -1.7005861531401179, 1.4952347206139418,
-4.5979578172283739], [-2.3055331093587372, -3.6474162865795225, -3.0632961186256935, 4.7258384683418715,
-0.58388337502415943], [4.7641302227265427, -0.11182220465882864, 2.8628458472454756, 1.6967713595739653,
2.8474759788446562], [2.5863322473986914, 1.6349340161801535, -2.9934700314340712, 3.4068691472223609,
-0.97913156666695667]])+(1.-msk_arg0)*numpy.array([[-0.34407378508566389, 2.6789454460601672, -3.3795587578901665,
-4.1659261688389009, 2.3147542825953309], [-2.0615148857755603, -2.1181768528675784, 4.7855957803525566,
2.4248630846228734, 4.4597452365342818], [4.5985091304874572, 2.9992334161018466, 0.73974708846994552,
-0.24440017509511858, -0.49166350583553875], [1.5878740787090537, 3.0210382196579779, 3.6343442933400869,
1.5494651243470852, -3.3635312675197349]])
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-3.53998589595)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[6.8899985356048354, 7.0343762163049011, 1.8393997428112305, 5.0352206165652902,
-1.0579719212770256], [1.2344527865926112, -0.10743039062817417, 0.47668977732565487, 8.2658243642932199,
2.9561025209271889], [8.3041161186778911, 3.4281636912925197, 6.402831743196824, 5.2367572555253137,
6.3874618747960046], [6.1263181433500398, 5.1749199121315019, 0.54651586451727718, 6.9468550431737093,
2.5608543292843917]])+(1.-msk_ref)*numpy.array([[3.1959121108656845, 6.2189313420115155, 0.16042713806118192,
-0.6259402728875525, 5.8547401785466793], [1.4784710101757881, 1.42180904308377, 8.325581676303905, 5.9648489805742217,
7.9997311324856302], [8.1384950264388056, 6.539219312053195, 4.2797329844212939, 3.2955857208562298,
3.0483223901158096], [5.1278599746604021, 6.5610241156093263, 7.1743301892914353, 5.0894510202984335,
0.17645462843161352]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0* | numpy.array([[2.0867998826855514, 2.311725929216629, -3.4719596731221403, 1.7817832811577139,
1.5141982978301929], [3.1010865709749673, -2.1704923524391537, 3.7204405507466163, 4.629811066660821,
1.6635344950905893], [-2.574527711983543, -1.6203338172344193, 3.7119433126415871, -4.2495237660622687,
-2.1154248806831588], [0.14708606411584846, -4.3739162090051034, 0.28212084215683131, -3.2454357930486841,
4.0490170686662843]])+(1.-msk_arg0) | numpy.array |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import treecorr
import os
import coord
import fitsio
from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer
from test_helper import is_ccw, is_ccw_3d
@timer
def test_log_binning():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
np.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
np.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
np.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
np.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
np.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
np.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
np.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
np.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
np.testing.assert_equal(nnn.v1d.shape, (2*nnn.nvbins,) )
np.testing.assert_almost_equal(nnn.v1d[0], -nnn.max_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins-1], -nnn.min_v - 0.5*nnn.vbin_size)
np.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
np.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == np.ceil(1./nnn.ubin_size)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == np.ceil(1./nnn.vbin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=0., max_v=0.2, nvbins=2)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 2
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.4, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert np.isclose(nnn.ubin_size, 0.05)
assert np.isclose(nnn.min_u, 0.75)
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert np.isclose(nnn.vbin_size, 0.05)
assert np.isclose(nnn.min_v, 0.2)
assert nnn.max_v == 0.4
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert np.isclose(nnn.ubin_size, 0.05)
assert nnn.nubins == 4
assert nnn.min_v == 0.2
assert nnn.max_v == 0.4
assert np.isclose(nnn.vbin_size, 0.05)
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=0.1, max_v=0.3, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.bin_size <= 0.1
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 24
assert np.isclose(nnn.ubin_size, 0.7/24)
assert nnn.min_v == 0.1
assert nnn.max_v == 0.3
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only one of min/max v are set, respect that
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, ubin_size=0.03,
min_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.2
assert nnn.max_u == 1.
assert nnn.nubins == 27
assert np.isclose(nnn.ubin_size, 0.8/27)
assert nnn.min_v == 0.2
assert nnn.max_v == 1.
assert nnn.nvbins == 12
assert np.isclose(nnn.vbin_size, 0.8/12)
check_arrays(nnn)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
max_u=0.2, ubin_size=0.03,
max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.
assert nnn.max_u == 0.2
assert nnn.nubins == 7
assert np.isclose(nnn.ubin_size, 0.2/7)
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only vbin_size is set for v, automatically figure out others.
# (And if necessary adjust the bin_size down a bit.)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.3, vbin_size=0.3)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 4
assert np.isclose(nnn.ubin_size, 0.25)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 4
assert np.isclose(nnn.vbin_size, 0.25)
check_arrays(nnn)
# If only nvbins is set for v, automatically figure out others.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
nubins=5, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 5
assert np.isclose(nnn.ubin_size,0.2)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 5
assert np.isclose(nnn.vbin_size,0.2)
check_arrays(nnn)
# If both nvbins and vbin_size are set, set min/max automatically
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.1, nubins=5,
vbin_size=0.1, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.ubin_size == 0.1
assert nnn.nubins == 5
assert nnn.max_u == 1.
assert np.isclose(nnn.min_u,0.5)
assert nnn.vbin_size == 0.1
assert nnn.nvbins == 5
assert nnn.min_v == 0.
assert np.isclose(nnn.max_v,0.5)
check_arrays(nnn)
assert_raises(TypeError, treecorr.NNNCorrelation)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, bin_size=0.1)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Log')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Linear')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='TwoD')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Invalid')
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.9, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=-0.1, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.1, max_u=1.3)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.9, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=-0.1, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=1.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
split_method='invalid')
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5.)
np.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.02)
np.testing.assert_almost_equal(nnn.bu, 0.006)
np.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.0)
np.testing.assert_almost_equal(nnn.bu, 0.0)
np.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.2)
np.testing.assert_almost_equal(nnn.bu, 0.06)
np.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.4)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
np.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05,
min_u=0., max_u=0.9, ubin_size=0.3,
min_v=0., max_v=0.17, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert np.isclose(nnn.ubin_size, 0.3)
assert np.isclose(nnn.vbin_size, 0.17)
np.testing.assert_almost_equal(nnn.b, 0.05)
np.testing.assert_almost_equal(nnn.bu, 0.1)
np.testing.assert_almost_equal(nnn.bv, 0.1)
np.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
@timer
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
nz = np.where((ddd.ntri > 0) | (true_ntri > 0))
print('non-zero at:')
print(nz)
print('d1 = ',ddd.meand1[nz])
print('d2 = ',ddd.meand2[nz])
print('d3 = ',ddd.meand3[nz])
print('rnom = ',ddd.rnom[nz])
print('u = ',ddd.u[nz])
print('v = ',ddd.v[nz])
print('ddd.ntri = ',ddd.ntri[nz])
print('true_ntri = ',true_ntri[nz])
print('diff = ',ddd.ntri[nz] - true_ntri[nz])
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Check that running via the corr3 script works correctly.
file_name = os.path.join('data','nnn_direct_data.dat')
with open(file_name, 'w') as fid:
for i in range(ngal):
fid.write(('%.20f %.20f\n')%(x[i],y[i]))
L = 10*s
nrand = ngal
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rcat = treecorr.Catalog(x=rx, y=ry)
rand_file_name = os.path.join('data','nnn_direct_rand.dat')
with open(rand_file_name, 'w') as fid:
for i in range(nrand):
fid.write(('%.20f %.20f\n')%(rx[i],ry[i]))
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0, rng=rng)
rrr.process(rcat)
zeta, varzeta = ddd.calculateZeta(rrr)
# Semi-gratuitous check of BinnedCorr3.rng access.
assert rrr.rng is rng
assert ddd.rng is not rng
# First do this via the corr3 function.
config = treecorr.config.read_config('configs/nnn_direct.yaml')
logger = treecorr.config.setup_logger(0)
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
print('corr3_output = ',corr3_output)
print('corr3_output.dtype = ',corr3_output.dtype)
print('rnom = ',ddd.rnom.flatten())
print(' ',corr3_output['r_nom'])
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
print('unom = ',ddd.u.flatten())
print(' ',corr3_output['u_nom'])
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
print('vnom = ',ddd.v.flatten())
print(' ',corr3_output['v_nom'])
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
print('DDD = ',ddd.ntri.flatten())
print(' ',corr3_output['DDD'])
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('RRR = ',rrr.ntri.flatten())
print(' ',corr3_output['RRR'])
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten(), rtol=1.e-3)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('diff = ',corr3_output['zeta']-zeta.flatten())
diff_index = np.where(np.abs(corr3_output['zeta']-zeta.flatten()) > 1.e-5)[0]
print('different at ',diff_index)
print('zeta[diffs] = ',zeta.flatten()[diff_index])
print('corr3.zeta[diffs] = ',corr3_output['zeta'][diff_index])
print('diff[diffs] = ',zeta.flatten()[diff_index] - corr3_output['zeta'][diff_index])
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Now calling out to the external corr3 executable.
# This is the only time we test the corr3 executable. All other tests use corr3 function.
import subprocess
corr3_exe = get_script_name('corr3')
p = subprocess.Popen( [corr3_exe,"configs/nnn_direct.yaml","verbose=0"] )
p.communicate()
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Also check compensated
drr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
rdd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
drr.process(cat, rcat)
rdd.process(rcat, cat)
zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd)
config['nnn_statistic'] = 'compensated'
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1)
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('rrr.tot = ',rrr.tot)
print('ddd.tot = ',ddd.tot)
print('drr.tot = ',drr.tot)
print('rdd.tot = ',rdd.tot)
rrrf = ddd.tot / rrr.tot
drrf = ddd.tot / drr.tot
rddf = ddd.tot / rdd.tot
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten() * rrrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DRR'], drr.ntri.flatten() * drrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['RDD'], rdd.ntri.flatten() * rddf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Repeat with binslop = 0, since the code flow is different from bture=True
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
# Here, we get 6x as much, since each triangle is discovered 6 times.
ddd.clear()
ddd.process(cat,cat,cat, num_threads=2)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, 6*true_ntri)
# With the real CrossCorrelation class, each of the 6 correlations should end up being
# the same thing (without the extra factor of 6).
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
dddc.process(cat,cat,cat, num_threads=2)
# All 6 correlations are equal.
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(d.ntri, true_ntri)
# Or with 2 argument version, finds each triangle 3 times.
ddd.process(cat,cat, num_threads=2)
np.testing.assert_array_equal(ddd.ntri, 3*true_ntri)
# Again, NNNCrossCorrelation gets it right in each permutation.
dddc.process(cat,cat, num_threads=2)
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
np.testing.assert_array_equal(d.ntri, true_ntri)
# Invalid to omit file_name
config['verbose'] = 0
del config['file_name']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name'] = 'data/nnn_direct_data.dat'
# OK to not have rand_file_name
# Also, check the automatic setting of output_dots=True when verbose=2.
# It's not too annoying if we also set max_top = 0.
del config['rand_file_name']
config['verbose'] = 2
config['max_top'] = 0
treecorr.corr3(config)
data = np.genfromtxt(config['nnn_file_name'], names=True, skip_header=1)
np.testing.assert_array_equal(data['ntri'], true_ntri.flatten())
assert 'zeta' not in data.dtype.names
# Check a few basic operations with a NNNCorrelation object.
do_pickle(ddd)
ddd2 = ddd.copy()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, 2*ddd.ntri)
np.testing.assert_allclose(ddd2.weight, 2*ddd.weight)
np.testing.assert_allclose(ddd2.meand1, 2*ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, 2*ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, 2*ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, 2*ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, 2*ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, 2*ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, 2*ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, 2*ddd.meanv)
ddd2.clear()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, ddd.ntri)
np.testing.assert_allclose(ddd2.weight, ddd.weight)
np.testing.assert_allclose(ddd2.meand1, ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, ddd.meanv)
ascii_name = 'output/nnn_ascii.txt'
ddd.write(ascii_name, precision=16)
ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd3.read(ascii_name)
np.testing.assert_allclose(ddd3.ntri, ddd.ntri)
np.testing.assert_allclose(ddd3.weight, ddd.weight)
np.testing.assert_allclose(ddd3.meand1, ddd.meand1)
np.testing.assert_allclose(ddd3.meand2, ddd.meand2)
np.testing.assert_allclose(ddd3.meand3, ddd.meand3)
np.testing.assert_allclose(ddd3.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd3.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd3.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd3.meanu, ddd.meanu)
np.testing.assert_allclose(ddd3.meanv, ddd.meanv)
with assert_raises(TypeError):
ddd2 += config
ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd4
ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd5
ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd6
ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u-0.1, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd7
ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u+0.1, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd8
ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins*2,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd9
ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v-0.1, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd10
ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v+0.1, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd11
ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins*2)
with assert_raises(ValueError):
ddd2 += ddd12
# Check that adding results with different coords or metric emits a warning.
cat2 = treecorr.Catalog(x=x, y=y, z=x)
with CaptureLog() as cl:
ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd13.process_auto(cat2)
ddd13 += ddd2
print(cl.output)
assert "Detected a change in catalog coordinate systems" in cl.output
with CaptureLog() as cl:
ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd14.process_auto(cat2, metric='Arc')
ddd14 += ddd2
assert "Detected a change in metric" in cl.output
fits_name = 'output/nnn_fits.fits'
ddd.write(fits_name)
ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd15.read(fits_name)
np.testing.assert_allclose(ddd15.ntri, ddd.ntri)
np.testing.assert_allclose(ddd15.weight, ddd.weight)
| np.testing.assert_allclose(ddd15.meand1, ddd.meand1) | numpy.testing.assert_allclose |
import datetime as dt
import pytest
from distutils.version import LooseVersion
import numpy as np
try:
import pandas as pd
from pandas._testing import (
makeCustomDataframe, makeMixedDataFrame, makeTimeDataFrame
)
except ImportError:
pytestmark = pytest.mark.skip('pandas not available')
from bokeh.models.widgets.tables import (
NumberFormatter, IntEditor, NumberEditor, StringFormatter,
SelectEditor, DateFormatter, DataCube, CellEditor,
SumAggregator, AvgAggregator, MinAggregator
)
from panel.depends import bind
from panel.widgets import Button, DataFrame, Tabulator, TextInput
pd_old = pytest.mark.skipif(LooseVersion(pd.__version__) < '1.3',
reason="Requires latest pandas")
def test_dataframe_widget(dataframe, document, comm):
table = DataFrame(dataframe)
model = table.get_root(document, comm)
index_col, int_col, float_col, str_col = model.columns
assert index_col.title == 'index'
assert isinstance(index_col.formatter, NumberFormatter)
assert isinstance(index_col.editor, CellEditor)
assert int_col.title == 'int'
assert isinstance(int_col.formatter, NumberFormatter)
assert isinstance(int_col.editor, IntEditor)
assert float_col.title == 'float'
assert isinstance(float_col.formatter, NumberFormatter)
assert isinstance(float_col.editor, NumberEditor)
assert str_col.title == 'str'
assert isinstance(float_col.formatter, StringFormatter)
assert isinstance(float_col.editor, NumberEditor)
def test_dataframe_widget_no_show_index(dataframe, document, comm):
table = DataFrame(dataframe, show_index=False)
model = table.get_root(document, comm)
assert len(model.columns) == 3
int_col, float_col, str_col = model.columns
assert int_col.title == 'int'
assert float_col.title == 'float'
assert str_col.title == 'str'
table.show_index = True
assert len(model.columns) == 4
index_col, int_col, float_col, str_col = model.columns
assert index_col.title == 'index'
assert int_col.title == 'int'
assert float_col.title == 'float'
assert str_col.title == 'str'
def test_dataframe_widget_datetimes(document, comm):
table = DataFrame(makeTimeDataFrame())
model = table.get_root(document, comm)
dt_col, _, _, _, _ = model.columns
assert dt_col.title == 'index'
assert isinstance(dt_col.formatter, DateFormatter)
assert isinstance(dt_col.editor, CellEditor)
def test_dataframe_editors(dataframe, document, comm):
editor = SelectEditor(options=['A', 'B', 'C'])
table = DataFrame(dataframe, editors={'str': editor})
model = table.get_root(document, comm)
model_editor = model.columns[-1].editor
assert isinstance(model_editor, SelectEditor) is not editor
assert isinstance(model_editor, SelectEditor)
assert model_editor.options == ['A', 'B', 'C']
def test_dataframe_formatter(dataframe, document, comm):
formatter = NumberFormatter(format='0.0000')
table = DataFrame(dataframe, formatters={'float': formatter})
model = table.get_root(document, comm)
model_formatter = model.columns[2].formatter
assert model_formatter is not formatter
assert isinstance(model_formatter, NumberFormatter)
assert model_formatter.format == formatter.format
def test_dataframe_triggers(dataframe):
events = []
def increment(event, events=events):
events.append(event)
table = DataFrame(dataframe)
table.param.watch(increment, 'value')
table._process_events({'data': {'str': ['C', 'B', 'A']}})
assert len(events) == 1
def test_dataframe_does_not_trigger(dataframe):
events = []
def increment(event, events=events):
events.append(event)
table = DataFrame(dataframe)
table.param.watch(increment, 'value')
table._process_events({'data': {'str': ['A', 'B', 'C']}})
assert len(events) == 0
def test_dataframe_selected_dataframe(dataframe):
table = DataFrame(dataframe, selection=[0, 2])
pd.testing.assert_frame_equal(table.selected_dataframe, dataframe.iloc[[0, 2]])
def test_dataframe_process_selection_event(dataframe):
table = DataFrame(dataframe, selection=[0, 2])
table._process_events({'indices': [0, 2]})
pd.testing.assert_frame_equal(table.selected_dataframe, dataframe.iloc[[0, 2]])
def test_dataframe_process_data_event(dataframe):
df = dataframe.copy()
table = DataFrame(dataframe, selection=[0, 2])
table._process_events({'data': {'int': [5, 7, 9]}})
df['int'] = [5, 7, 9]
pd.testing.assert_frame_equal(table.value, df)
table._process_events({'data': {'int': {1: 3, 2: 4, 0: 1}}})
df['int'] = [1, 3, 4]
pd.testing.assert_frame_equal(table.value, df)
def test_dataframe_duplicate_column_name(document, comm):
df = pd.DataFrame([[1, 1], [2, 2]], columns=['col', 'col'])
with pytest.raises(ValueError):
table = DataFrame(df)
df = pd.DataFrame([[1, 1], [2, 2]], columns=['a', 'b'])
table = DataFrame(df)
with pytest.raises(ValueError):
table.value = table.value.rename(columns={'a': 'b'})
df = pd.DataFrame([[1, 1], [2, 2]], columns=['a', 'b'])
table = DataFrame(df)
table.get_root(document, comm)
with pytest.raises(ValueError):
table.value = table.value.rename(columns={'a': 'b'})
def test_hierarchical_index(document, comm):
df = pd.DataFrame([
('Germany', 2020, 9, 2.4, 'A'),
('Germany', 2021, 3, 7.3, 'C'),
('Germany', 2022, 6, 3.1, 'B'),
('UK', 2020, 5, 8.0, 'A'),
('UK', 2021, 1, 3.9, 'B'),
('UK', 2022, 9, 2.2, 'A')
], columns=['Country', 'Year', 'Int', 'Float', 'Str']).set_index(['Country', 'Year'])
table = DataFrame(value=df, hierarchical=True,
aggregators={'Year': {'Int': 'sum', 'Float': 'mean'}})
model = table.get_root(document, comm)
assert isinstance(model, DataCube)
assert len(model.grouping) == 1
grouping = model.grouping[0]
assert len(grouping.aggregators) == 2
agg1, agg2 = grouping.aggregators
assert agg1.field_ == 'Int'
assert isinstance(agg1, SumAggregator)
assert agg2.field_ == 'Float'
assert isinstance(agg2, AvgAggregator)
table.aggregators = {'Year': 'min'}
agg1, agg2 = grouping.aggregators
print(grouping)
assert agg1.field_ == 'Int'
assert isinstance(agg1, MinAggregator)
assert agg2.field_ == 'Float'
assert isinstance(agg2, MinAggregator)
def test_none_table(document, comm):
table = DataFrame(value=None)
assert table.indexes == []
model = table.get_root(document, comm)
assert model.source.data == {}
def test_tabulator_selected_dataframe():
df = makeMixedDataFrame()
table = Tabulator(df, selection=[0, 2])
pd.testing.assert_frame_equal(table.selected_dataframe, df.iloc[[0, 2]])
def test_tabulator_selected_and_filtered_dataframe(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
pd.testing.assert_frame_equal(table.selected_dataframe, df)
table.add_filter('foo3', 'C')
pd.testing.assert_frame_equal(table.selected_dataframe, df[df["C"] == "foo3"])
def test_tabulator_config_defaults(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
assert model.configuration['columns'] == [
{'field': 'index'},
{'field': 'A'},
{'field': 'B'},
{'field': 'C'},
{'field': 'D'}
]
assert model.configuration['selectable'] == True
def test_tabulator_config_formatter_string(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, formatters={'B': 'tickCross'})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'formatter': 'tickCross'}
def test_tabulator_config_formatter_dict(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, formatters={'B': {'type': 'tickCross', 'tristate': True}})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'formatter': 'tickCross', 'formatterParams': {'tristate': True}}
def test_tabulator_config_editor_string(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, editors={'B': 'select'})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'editor': 'select'}
def test_tabulator_config_editor_dict(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, editors={'B': {'type': 'select', 'values': True}})
model = table.get_root(document, comm)
assert model.configuration['columns'][2] == {'field': 'B', 'editor': 'select', 'editorParams': {'values': True}}
def test_tabulator_groups(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, groups={'Number': ['A', 'B'], 'Other': ['C', 'D']})
model = table.get_root(document, comm)
assert model.configuration['columns'] == [
{'field': 'index'},
{'title': 'Number',
'columns': [
{'field': 'A'},
{'field': 'B'}
]},
{'title': 'Other',
'columns': [
{'field': 'C'},
{'field': 'D'}
]}
]
def test_tabulator_frozen_cols(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, frozen_columns=['index'])
model = table.get_root(document, comm)
assert model.configuration['columns'] == [
{'field': 'index', 'frozen': True},
{'field': 'A'},
{'field': 'B'},
{'field': 'C'},
{'field': 'D'}
]
def test_tabulator_frozen_rows(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, frozen_rows=[0, -1])
model = table.get_root(document, comm)
assert model.frozen_rows == [0, 4]
table.frozen_rows = [1, -2]
assert model.frozen_rows == [1, 3]
def test_tabulator_selectable_rows(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, selectable_rows=lambda df: list(df[df.A>2].index.values))
model = table.get_root(document, comm)
assert model.selectable_rows == [3, 4]
def test_tabulator_pagination(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, pagination='remote', page_size=2)
model = table.get_root(document, comm)
assert model.max_page == 3
assert model.page_size == 2
assert model.page == 1
expected = {
'index': np.array([0, 1]),
'A': np.array([0, 1]),
'B': np.array([0, 1]),
'C': np.array(['foo1', 'foo2']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
table.page = 2
expected = {
'index': np.array([2, 3]),
'A': np.array([2, 3]),
'B': np.array([0., 1.]),
'C': np.array(['foo3', 'foo4']),
'D': np.array(['2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
table.page_size = 3
table.page = 1
assert model.max_page == 2
expected = {
'index': np.array([0, 1, 2]),
'A': np.array([0, 1, 2]),
'B': np.array([0, 1, 0]),
'C': np.array(['foo1', 'foo2', 'foo3']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_pagination_selection(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, pagination='remote', page_size=2)
model = table.get_root(document, comm)
table.selection = [2, 3]
assert model.source.selected.indices == []
table.page = 2
assert model.source.selected.indices == [0, 1]
def test_tabulator_pagination_selectable_rows(document, comm):
df = makeMixedDataFrame()
table = Tabulator(
df, pagination='remote', page_size=3,
selectable_rows=lambda df: list(df.index.values[::2])
)
model = table.get_root(document, comm)
print(table._processed)
assert model.selectable_rows == [0, 2]
table.page = 2
assert model.selectable_rows == [3]
@pd_old
def test_tabulator_styling(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
def high_red(value):
return 'color: red' if value > 2 else 'color: black'
table.style.applymap(high_red, subset=['A'])
model = table.get_root(document, comm)
assert model.styles == {
0: {1: [('color', 'black')]},
1: {1: [('color', 'black')]},
2: {1: [('color', 'black')]},
3: {1: [('color', 'red')]},
4: {1: [('color', 'red')]}
}
def test_tabulator_empty_table(document, comm):
value_df = makeMixedDataFrame()
empty_df = pd.DataFrame([], columns=value_df.columns)
table = Tabulator(empty_df)
table.get_root(document, comm)
assert table.value.shape == empty_df.shape
table.stream(value_df, follow=True)
assert table.value.shape == value_df.shape
def test_tabulator_stream_series(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
stream_value = pd.Series({'A': 5, 'B': 1, 'C': 'foo6', 'D': dt.datetime(2009, 1, 8)})
table.stream(stream_value)
assert len(table.value) == 6
expected = {
'index': np.array([0, 1, 2, 3, 4, 5]),
'A': np.array([0, 1, 2, 3, 4, 5]),
'B': np.array([0, 1, 0, 1, 0, 1]),
'C': np.array(['foo1', 'foo2', 'foo3', 'foo4', 'foo5', 'foo6']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000',
'2009-01-08T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_stream_series_rollover(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
stream_value = pd.Series({'A': 5, 'B': 1, 'C': 'foo6', 'D': dt.datetime(2009, 1, 8)})
table.stream(stream_value, rollover=5)
assert len(table.value) == 5
expected = {
'index': np.array([1, 2, 3, 4, 5]),
'A': np.array([1, 2, 3, 4, 5]),
'B': np.array([1, 0, 1, 0, 1]),
'C': np.array(['foo2', 'foo3', 'foo4', 'foo5', 'foo6']),
'D': np.array(['2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000',
'2009-01-08T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_stream_df_rollover(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
stream_value = pd.Series({'A': 5, 'B': 1, 'C': 'foo6', 'D': dt.datetime(2009, 1, 8)}).to_frame().T
table.stream(stream_value, rollover=5)
assert len(table.value) == 5
expected = {
'index': np.array([1, 2, 3, 4, 5]),
'A': np.array([1, 2, 3, 4, 5]),
'B': np.array([1, 0, 1, 0, 1]),
'C': np.array(['foo2', 'foo3', 'foo4', 'foo5', 'foo6']),
'D': np.array(['2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000',
'2009-01-08T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_stream_dict_rollover(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
stream_value = {'A': [5], 'B': [1], 'C': ['foo6'], 'D': [dt.datetime(2009, 1, 8)]}
table.stream(stream_value, rollover=5)
assert len(table.value) == 5
expected = {
'index': np.array([1, 2, 3, 4, 5]),
'A': np.array([1, 2, 3, 4, 5]),
'B': np.array([1, 0, 1, 0, 1]),
'C': np.array(['foo2', 'foo3', 'foo4', 'foo5', 'foo6']),
'D': np.array(['2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000',
'2009-01-08T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_patch_scalars(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
table.patch({'A': [(0, 2), (4, 1)], 'C': [(0, 'foo0')]})
expected = {
'index': np.array([0, 1, 2, 3, 4]),
'A': np.array([2, 1, 2, 3, 1]),
'B': np.array([0, 1, 0, 1, 0]),
'C': np.array(['foo0', 'foo2', 'foo3', 'foo4', 'foo5']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_patch_ranges(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
table.patch({
'A': [(slice(0, 5), [5, 4, 3, 2, 1])],
'C': [(slice(0, 3), ['foo3', 'foo2', 'foo1'])]
})
expected = {
'index': np.array([0, 1, 2, 3, 4]),
'A': np.array([5, 4, 3, 2, 1]),
'B': np.array([0, 1, 0, 1, 0]),
'C': np.array(['foo3', 'foo2', 'foo1', 'foo4', 'foo5']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_stream_series_paginated_not_follow(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, pagination='remote', page_size=2)
model = table.get_root(document, comm)
stream_value = pd.Series({'A': 5, 'B': 1, 'C': 'foo6', 'D': dt.datetime(2009, 1, 8)})
table.stream(stream_value, follow=False)
assert table.page == 1
assert len(table.value) == 6
expected = {
'index': np.array([0, 1]),
'A': np.array([0, 1]),
'B': np.array([0, 1]),
'C': np.array(['foo1', 'foo2']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_stream_series_paginated_follow(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, pagination='remote', page_size=2)
model = table.get_root(document, comm)
stream_value = pd.Series({'A': 5, 'B': 1, 'C': 'foo6', 'D': dt.datetime(2009, 1, 8)})
table.stream(stream_value, follow=True)
assert table.page == 3
assert len(table.value) == 6
expected = {
'index': np.array([4, 5]),
'A': np.array([4, 5]),
'B': np.array([0, 1]),
'C': np.array(['foo5', 'foo6']),
'D': np.array(['2009-01-07T00:00:00.000000000',
'2009-01-08T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_paginated_sorted_selection(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df, pagination='remote', page_size=2)
table.sorters = [{'field': 'A', 'dir': 'dec'}]
model = table.get_root(document, comm)
table.selection = [3]
assert model.source.selected.indices == [1]
table.selection = [0, 1]
assert model.source.selected.indices == []
table.selection = [3, 4]
assert model.source.selected.indices == [1, 0]
table.selection = []
assert model.source.selected.indices == []
table._process_events({'indices': [0, 1]})
assert table.selection == [4, 3]
table._process_events({'indices': [1]})
assert table.selection == [3]
table.sorters = [{'field': 'A', 'dir': 'asc'}]
table._process_events({'indices': [1]})
assert table.selection == [1]
def test_tabulator_stream_dataframe(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
stream_value = pd.DataFrame({
'A': [5, 6],
'B': [1, 0],
'C': ['foo6', 'foo7'],
'D': [dt.datetime(2009, 1, 8), dt.datetime(2009, 1, 9)]
})
table.stream(stream_value)
assert len(table.value) == 7
expected = {
'index': np.array([0, 1, 2, 3, 4, 5, 6]),
'A': np.array([0, 1, 2, 3, 4, 5, 6]),
'B': np.array([0, 1, 0, 1, 0, 1, 0]),
'C': np.array(['foo1', 'foo2', 'foo3', 'foo4', 'foo5', 'foo6', 'foo7']),
'D': np.array(['2009-01-01T00:00:00.000000000',
'2009-01-02T00:00:00.000000000',
'2009-01-05T00:00:00.000000000',
'2009-01-06T00:00:00.000000000',
'2009-01-07T00:00:00.000000000',
'2009-01-08T00:00:00.000000000',
'2009-01-09T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_constant_scalar_filter(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
table.add_filter('foo3', 'C')
expected = {
'index': np.array([2]),
'A': np.array([2]),
'B': np.array([0]),
'C': np.array(['foo3']),
'D': np.array(['2009-01-05T00:00:00.000000000'],
dtype='datetime64[ns]')
}
for col, values in model.source.data.items():
np.testing.assert_array_equal(values, expected[col])
def test_tabulator_constant_list_filter(document, comm):
df = makeMixedDataFrame()
table = Tabulator(df)
model = table.get_root(document, comm)
table.add_filter(['foo3', 'foo5'], 'C')
expected = {
'index': np.array([2, 4]),
'A': np.array([2, 4]),
'B': np.array([0, 0]),
'C': | np.array(['foo3', 'foo5']) | numpy.array |
# This first demo shows how a robot swarm can autonomously choose a loop shape and form the
# shape in a distributed manner, without central control. Two consensus processes, decision
# making and role assignment, are performed consecutively with a fixed but arbitrary network.
# input arguments:
# '-n': number of robots
# '--manual': manual mode, press ENTER to proceed between simulations
# Description:
# Starting dispersed in random positions, the swarm aggregates together arbitrarily to form
# a connected network. Consensus decision making is performed with this network fixed, making
# a collective decision of which loop the swarm should form. Then with the same network, role
# assignment is performed, assigning the target positions to each robot. After these two
# consensus processes are done, the swarm disperses and aggregates again, this time aiming to
# form a loop with robots at their designated positions. The climbing method is used to permutate
# the robots on the loop. When the robots get their target positions, they dyanmically adjust
# the local shape so the loop deforms to the target one. The above steps will be ran repeatedly.
# the simulations that run consecutively
# Simulation 1: aggregate together to form a random network
# Simulation 2: consensus decision making of target loop shape
# Simulation 3: consensus role assignment for the loop shape
# Simulation 4: loop formation with designated role assignment
# Simulation 5: loop reshaping to chosen shape
# message relay in the role assignment
# Note that message transmission is simulated only in the role assignment. Because message
# relay is implemented to make this simulation possible, therefore communication between robots
# becomes important, and steps of message transmissions are simulated. However the delay caused
# by communication is skipped in other simulations.
# "seed" robot mechanism
# In simulation 1, a new mechanism is added to accelerate the aggregation. Previous, each
# robot in the swarm can start a new group with another robot. The result is that, often there
# are too many small local groups spread out the space, decrease the chance of a large group
# being formed. The new method is asigning certain number of robots to be "seed" robot. Only
# seed robot can initialize the forming of a new group, so as to avoid too many local groups.
# The percentage of seed robots is a new parameter to study, small percentage results in less
# robustness of the aggregation process, large percentage results in slow aggregation process.
# When a robot travels almost parallel to the boundaries, sometimes it takes unnecessarily long
# time for it to reach a group. To avoid that, every time a robot is bounced away by the wall,
# if the leaving direction is too perpendicular, a deviation angle is added to deviation the
# robot.
# 04/11/2018
# In simulation of aggregating to random network, the robots used to need only one connection
# to stay stable in the group. The result is that the final network topology is always tree
# branch like. Most robots are only serial connected. This topology is often rated as low
# connectivity, and takes longer time for the consensus processes. Although the unintended
# advantage is that the robots are more easily caught in the tree network.
# Advised by Dr. Lee, it is better that the final network look like the triangle grid network.
# In this way the swarm robots will have more evenly distributed coverage over the space.
# To implement this: when in a group, if a robot has two or more connections, it moves to the
# destination calculated in the old way. If it has only one connection, it will rotate around
# counter-clockwise around that neighbor robot, untill it finds another neighbor. Again, there
# is an exception that the group itself has only two members.
# 05/19/2018
# Change color plan for the shape formation in simulation 1 and 4, use red color for seed robot
# regardless of its states. Use black for dominant group robots, and use grey for the rest.
# The same to the simulation 1 in demo 2.
# As I later find out, it's not the color that make seed robot hard to distinguish, it's because
# the size of the dot is small.
from __future__ import print_function
import pygame
import sys, os, getopt, math, random
import numpy as np
import pickle # for storing variables
swarm_size = 30 # default size of the swarm
manual_mode = False # manually press enter key to proceed between simulations
# read command line options
try:
opts, args = getopt.getopt(sys.argv[1:], 'n:', ['manual'])
except getopt.GetoptError as err:
print(str(err))
sys.exit()
for opt,arg in opts:
if opt == '-n':
swarm_size = int(arg)
elif opt == '--manual':
manual_mode = True
# conversion between physical and display world sizes
# To best display any robot swarm in its appropriate window size, and have enough physical
# space for the robots to move around, it has been made that the ratio from unit world size
# to unit display size is fixed. The desired physical space between robots when they do shape
# formation is also fixed. So a small swarm will have small physical world, and a linearly
# small display window; vice versa for a large swarm.
# If the size of the swarm is proportional to the side length of the world, the area of the
# world will grow too fast. If the swarm size is proportional to the area of the world, when
# the size of the swarm grow large, it won't be able to be fitted in if performing a line or
# circle formation. A compromise is to make swarm size proportional to the side length to the
# power exponent between 1 and 2.
power_exponent = 1.3 # between 1.0 and 2.0
# the larger the parameter, the slower the windows grows with swarm size; vice versa
# for converting from physical world to display world
pixels_per_length = 50 # this is to be fixed
# calculate world_side_coef from a desired screen size for 30 robots
def cal_world_side_coef():
desired_screen_size = 400 # desired screen size for 30 robots
desired_world_size = float(desired_screen_size) / pixels_per_length
return desired_world_size / pow(30, 1/power_exponent)
world_side_coef = cal_world_side_coef()
world_side_length = world_side_coef * pow(swarm_size, 1/power_exponent)
world_size = (world_side_length, world_side_length) # square physical world
# screen size calculated from world size
screen_side_length = int(pixels_per_length * world_side_length)
screen_size = (screen_side_length, screen_side_length) # square display world
# formation configuration
comm_range = 0.65 # communication range in the world
desired_space_ratio = 0.8 # ratio of the desired space to the communication range
# should be larger than 1/1.414=0.71, to avoid connections crossing each other
desired_space = comm_range * desired_space_ratio
# deviate robot heading, so as to avoid robot travlling perpendicular to the walls
perp_thres = math.pi/18 # threshold, range from the perpendicular line
devia_angle = math.pi/9 # deviate these much angle from perpendicualr line
# consensus configuration
loop_folder = "loop-data2" # folder to store the loop shapes
shape_catalog = ["airplane", "circle", "cross", "goblet", "hand", "K", "lamp", "square",
"star", "triangle", "wrench"]
shape_quantity = len(shape_catalog) # the number of decisions
shape_decision = -1 # the index of chosen decision, in range(shape_quantity)
# also the index in shape_catalog
assignment_scheme = np.zeros(swarm_size)
# variable to force shape to different choices, for video recording
force_shape_set = range(shape_quantity)
# robot properties
robot_poses = np.random.rand(swarm_size, 2) * world_side_length # initialize the robot poses
dist_table = np.zeros((swarm_size, swarm_size)) # distances between robots
conn_table = np.zeros((swarm_size, swarm_size)) # connections between robots
# 0 for disconnected, 1 for connected
conn_lists = [[] for i in range(swarm_size)] # lists of robots connected
# function for all simulations, update the distances and connections between the robots
def dist_conn_update():
global dist_table
global conn_table
global conn_lists
conn_lists = [[] for i in range(swarm_size)] # empty the lists
for i in range(swarm_size):
for j in range(i+1, swarm_size):
dist_temp = np.linalg.norm(robot_poses[i] - robot_poses[j])
dist_table[i,j] = dist_temp
dist_table[j,i] = dist_temp
if dist_temp > comm_range:
conn_table[i,j] = 0
conn_table[j,i] = 0
else:
conn_table[i,j] = 1
conn_table[j,i] = 1
conn_lists[i].append(j)
conn_lists[j].append(i)
dist_conn_update() # update the distances and connections
disp_poses = [] # display positions
# function for all simulations, update the display positions
def disp_poses_update():
global disp_poses
poses_temp = robot_poses / world_side_length
poses_temp[:,1] = 1.0 - poses_temp[:,1]
poses_temp = poses_temp * screen_side_length
disp_poses = poses_temp.astype(int) # convert to int and assign to disp_poses
disp_poses_update()
# deciding the seed robots, used in simulations with moving robots
seed_percentage = 0.1 # the percentage of seed robots in the swarm
seed_quantity = min(max(int(swarm_size*seed_percentage), 1), swarm_size)
# no smaller than 1, and no larger than swarm_size
robot_seeds = [False for i in range(swarm_size)] # whether a robot is a seed robot
# only seed robot can initialize the forming a new group
seed_list_temp = np.arange(swarm_size)
np.random.shuffle(seed_list_temp)
for i in seed_list_temp[:seed_quantity]:
robot_seeds[i] = True
# visualization configuration
color_white = (255,255,255)
color_black = (0,0,0)
color_grey = (128,128,128)
color_red = (255,0,0)
# distinct_color_set = ((230,25,75), (60,180,75), (255,225,25), (0,130,200), (245,130,48),
# (145,30,180), (70,240,240), (240,50,230), (210,245,60), (250,190,190),
# (0,128,128), (230,190,255), (170,110,40), (255,250,200), (128,0,0),
# (170,255,195), (128,128,0), (255,215,180), (0,0,128), (128,128,128))
distinct_color_set = ((230,25,75), (60,180,75), (255,225,25), (0,130,200), (245,130,48),
(145,30,180), (70,240,240), (240,50,230), (210,245,60), (250,190,190),
(0,128,128), (230,190,255), (170,110,40), (128,0,0),
(170,255,195), (128,128,0), (0,0,128))
color_quantity = 17
# sizes for formation simulations
robot_size_formation = 5 # robot size in formation simulations
robot_width_empty = 2
conn_width_formation = 2 # connection line width in formation simulations
# sizes for consensus simulations
robot_size_consensus = 7 # robot size in consensus simulatiosn
conn_width_thin_consensus = 2 # thin connection line in consensus simulations
conn_width_thick_consensus = 4 # thick connection line in consensus simulations
robot_ring_size = 9 # extra ring on robot in consensus simulations
# the sizes for formation and consensus simulations are set to same for visual consistency
# set up the simulation window
pygame.init()
font = pygame.font.SysFont("Cabin", 12)
icon = pygame.image.load("icon_geometry_art.jpg")
pygame.display.set_icon(icon)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Demo 1")
# draw the network
screen.fill(color_white)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size_formation,
robot_width_empty)
# pygame.draw.circle(screen, color_black, disp_poses[i],
# int(comm_range*pixels_per_length), 1)
pygame.display.update()
# pause to check the network before the simulations, or for screen recording
raw_input("<Press Enter to continue>")
# function for simulation 1 and 4, group robots by their group ids, and find the largest group
def S14_robot_grouping(robot_list, robot_group_ids, groups):
# the input list 'robot_list' should not be empty
groups_temp = {} # key is group id, value is list of robots
for i in robot_list:
group_id_temp = robot_group_ids[i]
if group_id_temp not in groups_temp.keys():
groups_temp[group_id_temp] = [i]
else:
groups_temp[group_id_temp].append(i)
group_id_max = -1 # the group with most members
# regardless of only one group or multiple groups in groups_temp
if len(groups_temp.keys()) > 1: # there is more than one group
# find the largest group and disassemble the rest
group_id_max = groups_temp.keys()[0]
size_max = len(groups[group_id_max][0])
for group_id_temp in groups_temp.keys()[1:]:
size_temp = len(groups[group_id_temp][0])
if size_temp > size_max:
group_id_max = group_id_temp
size_max = size_temp
else: # only one group, automatically the largest one
group_id_max = groups_temp.keys()[0]
return groups_temp, group_id_max
# function for simulation 1 and 4, find the closest robot to a host robot
# use global variable "dist_table"
def S14_closest_robot(robot_host, robot_neighbors):
# "robot_host": the robot to measure distance from
# "robot_neighbors": a list of robots to be compared with
robot_closest = robot_neighbors[0]
dist_closest = dist_table[robot_host,robot_closest]
for i in robot_neighbors[1:]:
dist_temp = dist_table[robot_host,i]
if dist_temp < dist_closest:
robot_closest = i
dist_closest = dist_temp
return robot_closest
# general function to normalize a numpy vector
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v/norm
# general function to reset radian angle to [-pi, pi)
def reset_radian(radian):
while radian >= math.pi:
radian = radian - 2*math.pi
while radian < -math.pi:
radian = radian + 2*math.pi
return radian
# general function to steer robot away from wall if out of boundary (following physics)
# use global variable "world_side_length"
def robot_boundary_check(robot_pos, robot_ori):
new_ori = robot_ori
if robot_pos[0] >= world_side_length: # outside of right boundary
if math.cos(new_ori) > 0:
new_ori = reset_radian(2*(math.pi/2) - new_ori)
# further check if new angle is too much perpendicular
if new_ori > 0:
if (math.pi - new_ori) < perp_thres:
new_ori = new_ori - devia_angle
else:
if (new_ori + math.pi) < perp_thres:
new_ori = new_ori + devia_angle
elif robot_pos[0] <= 0: # outside of left boundary
if math.cos(new_ori) < 0:
new_ori = reset_radian(2*(math.pi/2) - new_ori)
if new_ori > 0:
if new_ori < perp_thres:
new_ori = new_ori + devia_angle
else:
if (-new_ori) < perp_thres:
new_ori = new_ori - devia_angle
if robot_pos[1] >= world_side_length: # outside of top boundary
if math.sin(new_ori) > 0:
new_ori = reset_radian(2*(0) - new_ori)
if new_ori > -math.pi/2:
if (new_ori + math.pi/2) < perp_thres:
new_ori = new_ori + devia_angle
else:
if (-math.pi/2 - new_ori) < perp_thres:
new_ori = new_ori - devia_angle
elif robot_pos[1] <= 0: # outside of bottom boundary
if math.sin(new_ori) < 0:
new_ori = reset_radian(2*(0) - new_ori)
if new_ori > math.pi/2:
if (new_ori - math.pi/2) < perp_thres:
new_ori = new_ori + devia_angle
else:
if (math.pi/2 - new_ori) < perp_thres:
new_ori = new_ori - devia_angle
return new_ori
# # general function to steer robot away from wall if out of boundary (in random direction)
# # use global variable "world_side_length"
# def robot_boundary_check(robot_pos, robot_ori):
# new_ori = robot_ori
# if robot_pos[0] >= world_side_length: # outside of right boundary
# if math.cos(new_ori) > 0:
# new_ori = reset_radian(math.pi/2 + np.random.uniform(0,math.pi))
# elif robot_pos[0] <= 0: # outside of left boundary
# if math.cos(new_ori) < 0:
# new_ori = reset_radian(-math.pi/2 + np.random.uniform(0,math.pi))
# if robot_pos[1] >= world_side_length: # outside of top boundary
# if math.sin(new_ori) > 0:
# new_ori = reset_radian(-math.pi + np.random.uniform(0,math.pi))
# elif robot_pos[1] <= 0: # outside of bottom boundary
# if math.sin(new_ori) < 0:
# new_ori = reset_radian(0 + np.random.uniform(0,math.pi))
# return new_ori
# main loop of the program that run the set of simulations infinitely
# this loop does not exit unless error thrown out or manually terminated from terminal
while True:
########### simulation 1: aggregate together to form a random network ###########
print("##### simulation 1: network aggregation #####")
# (switching from using 'status' to using 'state': state here refers to being in one
# condition from many options, like whether in a group, whether available for connection.
# Status usually refers in a series of predefined stages, which goes one way from start
# to the end, like referring the progress of a project. While my state may jump back and
# forth. It's controversial of which one to use, but 'state' is what I choose.)
# robot perperties
# all robots start with state '-1', wandering around and ignoring connections
robot_states = np.array([-1 for i in range(swarm_size)])
# '-1' for being single, moving around, not available for connection
# '0' for being single, moving around, available for connection
# '1' for in a group, adjust position for maintaining connections
n1_life_lower = 2 # inclusive
n1_life_upper = 6 # exclusive
robot_n1_lives = np.random.uniform(n1_life_lower, n1_life_upper, swarm_size)
robot_oris = np.random.rand(swarm_size) * 2 * math.pi - math.pi # in range of [-pi, pi)
# group properties
groups = {}
# key is the group id, value is a list, in the list:
# [0]: a list of robots in the group
# [1]: remaining life time of the group
# [2]: whether or not being the dominant group
life_incre = 5 # number of seconds added to the life of a group when new robot joins
group_id_upper = swarm_size # group id is integer randomly chosen in [0, group_id_upper)
robot_group_ids = np.array([-1 for i in range(swarm_size)]) # group id of the robots
# '-1' for not in a group
# use moving distance in each simulation step, instead of robot velocity
# so to make it independent of simulation frequency control
step_moving_dist = 0.05 # should be smaller than destination distance error
destination_error = 0.08
mov_vec_ratio = 0.5 # ratio used when calculating mov vector
# the loop for simulation 1
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
frame_period = 50
sim_freq_control = True
iter_count = 0
# sys.stdout.write("iteration {}".format(iter_count)) # did nothing in iteration 0
print("swarm robots are aggregating to one network ...")
swarm_aggregated = False
ending_period = 3.0 # leave this much time to let robots settle after aggregation is dine
# # progress bar
# prog_bar_len = 30
# prog_pos = 0
# prog_step = 1
# prog_period = 1000
# prog_update_freq = int(prog_period/frame_period)
# prog_counter = 0
# sys.stdout.write("[" + prog_pos*"-" + "#" + (prog_bar_len-(prog_pos+1))*"-" + "]\r")
# sys.stdout.flush()
while True:
# close window button to exit the entire program;
# space key to pause this simulation
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 1")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > frame_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
# sys.stdout.write("\riteration {}".format(iter_count))
# sys.stdout.flush()
# # progress bar
# prog_counter = prog_counter + 1
# if prog_counter > prog_update_freq:
# prog_counter == 0
# if prog_pos == 0: prog_step = 1 # goes forward
# elif prog_pos == (prog_bar_len-1): prog_step = -1 # goes backward
# prog_pos = prog_pos + prog_step
# sys.stdout.write("[" + prog_pos*"-" + "#" +
# (prog_bar_len-(prog_pos+1))*"-" + "]\r")
# sys.stdout.flush()
# state transition variables
st_n1to0 = [] # robot '-1' gets back to '0' after life time ends
# list of robots changing to '0' from '-1'
st_gton1 = [] # group disassembles either life expires, or triggered by others
# list of groups to be disassembled
st_0to1_join = {} # robot '0' detects robot '1' group, join the group
# key is the robot '0', value is the group id
st_0to1_new = {} # robot '0' detects another robot '0', forming new group
# key is the robot '0', value is the other neighbor robot '0'
# update the "relations" of the robots
dist_conn_update()
# check any state transition, and schedule the tasks
for i in range(swarm_size):
if robot_states[i] == -1: # for host robot with state '-1'
if robot_n1_lives[i] < 0:
st_n1to0.append(i) # life of '-1' ends, becoming '0'
else:
conn_temp = conn_lists[i][:] # a list of connections with only state '1'
for j in conn_lists[i]:
if robot_states[j] != 1:
conn_temp.remove(j)
if len(conn_temp) != 0:
groups_local, group_id_max = S14_robot_grouping(conn_temp,
robot_group_ids, groups)
# disassmeble all groups except the largest one
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
# find the closest neighbor in groups_local[group_id_max]
robot_closest = S14_closest_robot(i, groups_local[group_id_max])
# change moving direction opposing the closest robot
vect_temp = robot_poses[i] - robot_poses[robot_closest]
robot_oris[i] = math.atan2(vect_temp[1], vect_temp[0])
elif robot_states[i] == 0: # for host robot with state '0'
state1_list = [] # list of state '1' robots in the connection list
state0_list = [] # list of state '0' robots in teh connection list
for j in conn_lists[i]:
# ignore state '-1' robots
if robot_states[j] == 1:
state1_list.append(j)
elif robot_states[j] == 0:
state0_list.append(j)
if len(state1_list) != 0:
# there is state '1' robot in the list, ignoring state '0' robot
groups_local, group_id_max = S14_robot_grouping(state1_list,
robot_group_ids, groups)
# disassmeble all groups except the largest one
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
# join the the group with the most members
st_0to1_join[i] = group_id_max
elif len(state0_list) != 0:
# there is no robot '1', but has robot '0'
# find the closest robot, schedule to start a new group with it
st_0to1_new[i] = S14_closest_robot(i, state0_list)
elif robot_states[i] == 1: # for host robot with state '1'
conn_temp = conn_lists[i][:] # a list of connections with only state '1'
has_other_group = False # whether there is robot '1' from other group
host_group_id = robot_group_ids[i] # group id of host robot
for j in conn_lists[i]:
if robot_states[j] != 1:
conn_temp.remove(j)
else:
if robot_group_ids[j] != host_group_id:
has_other_group = True
# disassemble the smaller groups
if has_other_group:
groups_local, group_id_max = S14_robot_grouping(conn_temp,
robot_group_ids, groups)
# disassmeble all groups except the largest one
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
else: # to be tested and deleted
print("robot state error")
sys.exit()
# check the life time of the groups; if expired, schedule disassemble
for group_id_temp in groups.keys():
if groups[group_id_temp][1] < 0: # life time of a group ends
if group_id_temp not in st_gton1:
st_gton1.append(group_id_temp)
# process the scheduled state transitions, different transition has different priority
# 1.st_0to1_join, robot '0' joins a group, becomes '1'
for robot_temp in st_0to1_join.keys():
group_id_temp = st_0to1_join[robot_temp] # the id of the group to join
# update properties of the robot
robot_states[robot_temp] = 1
robot_group_ids[robot_temp] = group_id_temp
# update properties of the group
groups[group_id_temp][0].append(robot_temp)
groups[group_id_temp][1] = groups[group_id_temp][1] + life_incre
# 2.st_gton1
for group_id_temp in st_gton1:
for robot_temp in groups[group_id_temp][0]:
robot_states[robot_temp] = -1
robot_n1_lives[robot_temp] = np.random.uniform(n1_life_lower, n1_life_upper)
robot_group_ids[robot_temp] = -1
robot_oris[robot_temp] = np.random.rand() * 2 * math.pi - math.pi
groups.pop(group_id_temp)
# 3.st_0to1_new
while len(st_0to1_new.keys()) != 0:
pair0 = st_0to1_new.keys()[0]
pair1 = st_0to1_new[pair0]
st_0to1_new.pop(pair0)
if (pair1 in st_0to1_new.keys()) and (st_0to1_new[pair1] == pair0):
st_0to1_new.pop(pair1)
# only forming a group if there is at least one seed robot in the pair
if robot_seeds[pair0] or robot_seeds[pair1]:
# forming new group for robot pair0 and pair1
group_id_temp = np.random.randint(0, group_id_upper)
while group_id_temp in groups.keys():
group_id_temp = np.random.randint(0, group_id_upper)
# update properties of the robots
robot_states[pair0] = 1
robot_states[pair1] = 1
robot_group_ids[pair0] = group_id_temp
robot_group_ids[pair1] = group_id_temp
# update properties of the group
groups[group_id_temp] = [[pair0, pair1], life_incre*2, False]
# 4.st_n1to0
for robot_temp in st_n1to0:
robot_states[robot_temp] = 0
# check if a group becomes dominant
for group_id_temp in groups.keys():
if len(groups[group_id_temp][0]) > swarm_size/2.0:
groups[group_id_temp][2] = True
else:
groups[group_id_temp][2] = False
# local connection lists for state '1' robots
local_conn_lists = [[] for i in range(swarm_size)] # connections in same group
robot_poses_t = np.copy(robot_poses) # as old poses
# update the physics
for i in range(swarm_size):
# change move direction only for robot '1', for adjusting location in group
if robot_states[i] == 1:
# find the neighbors in the same group
host_group_id = robot_group_ids[i]
for j in conn_lists[i]:
if (robot_states[j] == 1) and (robot_group_ids[j] == host_group_id):
local_conn_lists[i].append(j)
if len(local_conn_lists[i]) == 0: # should not happen after parameter tuning
printf("robot {} loses its group {}".format(i, host_group_id))
sys.exit()
# calculating the moving direction, based on neighbor situation
if (len(local_conn_lists[i]) == 1) and (len(groups[host_group_id][0]) > 2):
# If the robot has only one neighbor, and it is not the case that the group
# has only members, then the robot will try to secure another neighbor, by
# rotating counter-clockwise around this only neighbor.
center = local_conn_lists[i][0] # the center robot
# use the triangle of (desired_space, dist_table[i,center], step_moving_dist)
if dist_table[i,center] > (desired_space + step_moving_dist):
# moving toward the center robot
robot_oris[i] = math.atan2(robot_poses_t[center,1] - robot_poses_t[i,1],
robot_poses_t[center,0] - robot_poses_t[i,0])
elif (dist_table[i,center] + step_moving_dist) < desired_space:
# moving away from the center robot
robot_oris[i] = math.atan2(robot_poses_t[i,1] - robot_poses_t[center,1],
robot_poses_t[i,0] - robot_poses_t[center,0])
else:
# moving tangent along the circle of radius of "desired_space"
robot_oris[i] = math.atan2(robot_poses_t[i,1] - robot_poses_t[center,1],
robot_poses_t[i,0] - robot_poses_t[center,0])
# interior angle between dist_table[i,center] and step_moving_dist
int_angle_temp = math.acos((math.pow(dist_table[i,center],2) +
math.pow(step_moving_dist,2) - math.pow(desired_space,2)) /
(2.0*dist_table[i,center]*step_moving_dist))
robot_oris[i] = reset_radian(robot_oris[i] + (math.pi - int_angle_temp))
else: # the normal situation
# calculate the moving vector, and check if destination is within error range
mov_vec = np.zeros(2)
for j in local_conn_lists[i]: # accumulate the influence from all neighbors
mov_vec = mov_vec + (mov_vec_ratio * (dist_table[i,j] - desired_space) *
normalize(robot_poses_t[j] - robot_poses_t[i]))
if np.linalg.norm(mov_vec) < destination_error:
continue # skip the physics update if within destination error
else:
robot_oris[i] = math.atan2(mov_vec[1], mov_vec[0]) # change direction
# check if out of boundaries
robot_oris[i] = robot_boundary_check(robot_poses_t[i], robot_oris[i])
# update one step of move
robot_poses[i] = robot_poses_t[i] + (step_moving_dist *
np.array([math.cos(robot_oris[i]), math.sin(robot_oris[i])]))
# update the graphics
disp_poses_update()
screen.fill(color_white)
# draw the robots of states '-1' and '0'
for i in range(swarm_size):
if robot_seeds[i]:
color_temp = color_red
else:
color_temp = color_grey
if robot_states[i] == -1: # empty circle for state '-1' robot
pygame.draw.circle(screen, color_temp, disp_poses[i],
robot_size_formation, robot_width_empty)
elif robot_states[i] == 0: # full circle for state '0' robot
pygame.draw.circle(screen, color_temp, disp_poses[i],
robot_size_formation, 0)
# draw the in-group robots by each group
for group_id_temp in groups.keys():
if groups[group_id_temp][2]:
# highlight the dominant group with black color
color_group = color_black
else:
color_group = color_grey
conn_draw_sets = [] # avoid draw same connection two times
# draw the robots and connections in the group
for i in groups[group_id_temp][0]:
for j in local_conn_lists[i]:
if set([i,j]) not in conn_draw_sets:
pygame.draw.line(screen, color_group, disp_poses[i],
disp_poses[j], conn_width_formation)
conn_draw_sets.append(set([i,j]))
# draw robots in the group
if robot_seeds[i]: # force color red for seed robot
pygame.draw.circle(screen, color_red, disp_poses[i],
robot_size_formation, 0)
else:
pygame.draw.circle(screen, color_group, disp_poses[i],
robot_size_formation, 0)
pygame.display.update()
# reduce life time of robot '-1' and groups
for i in range(swarm_size):
if robot_states[i] == -1:
robot_n1_lives[i] = robot_n1_lives[i] - frame_period/1000.0
for group_id_temp in groups.keys():
if not groups[group_id_temp][2]: # skip dominant group
groups[group_id_temp][1] = groups[group_id_temp][1] - frame_period/1000.0
# check exit condition of simulation 1
if not swarm_aggregated:
if (len(groups.keys()) == 1) and (len(groups.values()[0][0]) == swarm_size):
swarm_aggregated = True # once aggregated, there is no turning back
if swarm_aggregated:
if ending_period <= 0:
print("simulation 1 is finished")
if manual_mode: raw_input("<Press Enter to continue>")
print("") # empty line
break
else:
ending_period = ending_period - frame_period/1000.0
# # store the variable "robot_poses"
# # tmp_filepath = os.path.join('tmp', 'demo1_30_robot_poses')
# tmp_filepath = os.path.join('tmp', 'demo1_100_robot_poses')
# with open(tmp_filepath, 'w') as f:
# pickle.dump(robot_poses, f)
# raw_input("<Press Enter to continue>")
# break
########### simulation 2: consensus decision making of target loop shape ###########
# # restore variable "robot_poses"
# # tmp_filepath = os.path.join('tmp', 'demo1_30_robot_poses')
# tmp_filepath = os.path.join('tmp', 'demo1_100_robot_poses')
# with open(tmp_filepath) as f:
# robot_poses = pickle.load(f)
print("##### simulation 2: decision making #####")
# "dist" in the variable may also refer to distribution
dist_conn_update() # need to update the network only once
# draw the network first time
disp_poses_update()
screen.fill(color_white)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size_consensus, 0)
for j in range(i+1, swarm_size):
if conn_table[i,j]:
pygame.draw.line(screen, color_black, disp_poses[i], disp_poses[j],
conn_width_thin_consensus)
pygame.display.update()
# initialize the decision making variables
shape_decision = -1
deci_dist = np.random.rand(swarm_size, shape_quantity)
sum_temp = np.sum(deci_dist, axis=1)
for i in range(swarm_size):
deci_dist[i] = deci_dist[i] / sum_temp[i]
deci_domi = np.argmax(deci_dist, axis=1)
groups = [] # robots reach local consensus are in same group
robot_group_sizes = [0 for i in range(swarm_size)] # group size for each robot
# color assignments for the robots and decisions
color_initialized = False # whether color assignment has been done for the first time
deci_colors = [-1 for i in range(shape_quantity)]
color_assigns = [0 for i in range(color_quantity)]
group_colors = []
robot_colors = [0 for i in range(swarm_size)]
# decision making control variables
dist_diff_thres = 0.3
dist_diff_ratio = [0.0 for i in range(swarm_size)]
dist_diff_power = 0.3
# the loop for simulation 2
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
frame_period = 1000
sim_freq_control = True
iter_count = 0
sys.stdout.write("iteration {}".format(iter_count))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 2")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > frame_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
sys.stdout.write("\riteration {}".format(iter_count))
sys.stdout.flush()
# 1.update the dominant decision for all robots
deci_domi = np.argmax(deci_dist, axis=1)
# 2.update the groups
groups = [] # empty the group container
group_deci = [] # the exhibited decision of the groups
robot_pool = range(swarm_size) # a robot pool, to be assigned into groups
while len(robot_pool) != 0: # searching groups one by one from the global robot pool
# start a new group, with first robot in the robot_pool
first_member = robot_pool[0] # first member of this group
group_temp = [first_member] # current temporary group
robot_pool.pop(0) # pop out first robot in the pool
# a list of potential members for current group
# this list may increase when new members of group are discovered
p_members = list(conn_lists[first_member])
# an index for iterating through p_members, in searching group members
p_index = 0 # if it climbs to the end, the searching ends
# index of dominant decision for current group
current_domi = deci_domi[first_member]
# dynamically iterating through p_members with p_index
while p_index < len(p_members): # index still in valid range
if deci_domi[p_members[p_index]] == current_domi:
# a new member has been found
new_member = p_members[p_index] # get index of the new member
p_members.remove(new_member) # remove it from p_members list
# but not increase p_index, because new value in p_members will flush in
robot_pool.remove(new_member) # remove it from the global robot pool
group_temp.append(new_member) # add it to current group
# check if new potential members are available, due to new robot discovery
p_members_new = conn_lists[new_member] # new potential members
for member in p_members_new:
if member not in p_members: # should not already in p_members
if member not in group_temp: # should not in current group
if member in robot_pool: # should be available in global pool
# if conditions satisfied, it is qualified as a potential member
p_members.append(member) # append at the end
else:
# a boundary robot(share different decision) has been met
# leave it in p_members, will help to avoid checking back again on this robot
p_index = p_index + 1 # shift right one position
# all connected members for this group have been located
groups.append(group_temp) # append the new group
group_deci.append(deci_domi[first_member]) # append new group's exhibited decision
# update the colors for the exhibited decisions
if not color_initialized:
color_initialized = True
select_set = range(color_quantity) # the initial selecting set
all_deci_set = set(group_deci) # put all exhibited decisions in a set
for deci in all_deci_set: # avoid checking duplicate decisions
if len(select_set) == 0:
select_set = range(color_quantity) # start a new set to select from
chosen_color = np.random.choice(select_set)
select_set.remove(chosen_color)
deci_colors[deci] = chosen_color # assign the chosen color to decision
# increase the assignments of chosen color by 1
color_assigns[chosen_color] = color_assigns[chosen_color] + 1
else:
# remove the color for a decision, if it's no longer the decision of any group
all_deci_set = set(group_deci)
for i in range(shape_quantity):
if deci_colors[i] != -1: # there was a color assigned before
if i not in all_deci_set:
# decrease the assignments of chosen color by 1
color_assigns[deci_colors[i]] = color_assigns[deci_colors[i]] - 1
deci_colors[i] = -1 # remove the assigned color
# assign color for an exhibited decision if not assigned
select_set = [] # set of colors to select from, start from empty
for i in range(len(groups)):
if deci_colors[group_deci[i]] == -1:
if len(select_set) == 0:
# construct a new select_set
color_assigns_min = min(color_assigns)
color_assigns_temp = [j - color_assigns_min for j in color_assigns]
select_set = range(color_quantity)
for j in range(color_quantity):
if color_assigns_temp[j] != 0:
select_set.remove(j)
chosen_color = np.random.choice(select_set)
select_set.remove(chosen_color)
deci_colors[group_deci[i]] = chosen_color # assign the chosen color
# increase the assignments of chosen color by 1
color_assigns[chosen_color] = color_assigns[chosen_color] + 1
# update the colors for the groups
group_colors = []
for i in range(len(groups)):
group_colors.append(deci_colors[group_deci[i]])
# 3.update the group size for each robot
for i in range(len(groups)):
size_temp = len(groups[i])
color_temp = group_colors[i]
for robot in groups[i]:
robot_group_sizes[robot] = size_temp
robot_colors[robot] = color_temp # update the color for each robot
# the decision distribution evolution
converged_all = True # flag for convergence of entire network
deci_dist_t = np.copy(deci_dist) # deep copy of the 'deci_dist'
for i in range(swarm_size):
host_domi = deci_domi[i]
converged = True
for neighbor in conn_lists[i]:
if host_domi != deci_domi[neighbor]:
converged = False
break
# action based on convergence of dominant decision
if converged: # all neighbors have converged with host
# step 1: take equally weighted average on all distributions
# including host and all neighbors
deci_dist[i] = deci_dist_t[i]*1.0 # start with host itself
for neighbor in conn_lists[i]:
# accumulate neighbor's distribution
deci_dist[i] = deci_dist[i] + deci_dist_t[neighbor]
# normalize the distribution such that sum is 1.0
sum_temp = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / sum_temp
# step 2: increase the unipolarity by applying the linear multiplier
# (this step is only for when all neighbors are converged)
# First find the largest difference between two distributions in this block
# of robots, including the host and all its neighbors.
comb_pool = [i] + list(conn_lists[i]) # add host to a pool with its neighbors
# will be used to form combinations from this pool
comb_pool_len = len(comb_pool)
dist_diff = []
for j in range(comb_pool_len):
for k in range(j+1, comb_pool_len):
j_robot = comb_pool[j]
k_robot = comb_pool[k]
dist_diff.append(np.sum(abs(deci_dist[j_robot] - deci_dist[k_robot])))
dist_diff_max = max(dist_diff) # maximum distribution difference of all
if dist_diff_max < dist_diff_thres:
# distribution difference is small enough,
# that linear multiplier should be applied to increase unipolarity
dist_diff_ratio = dist_diff_max/dist_diff_thres
# Linear multiplier is generated from value of smaller and larger ends, the
# smaller end is positively related with dist_diff_ratio. The smaller the
# maximum distribution difference, the smaller the dist_diff_ratio, and the
# steeper the linear multiplier.
# '1.0/shape_quantity' is the average value of the linear multiplier
small_end = 1.0/shape_quantity * np.power(dist_diff_ratio, dist_diff_power)
large_end = 2.0/shape_quantity - small_end
# sort the magnitude of the current distribution
dist_temp = np.copy(deci_dist[i]) # temporary distribution
sort_index = range(shape_quantity)
for j in range(shape_quantity-1): # bubble sort, ascending order
for k in range(shape_quantity-1-j):
if dist_temp[k] > dist_temp[k+1]:
# exchange values in 'dist_temp'
temp = dist_temp[k]
dist_temp[k] = dist_temp[k+1]
dist_temp[k+1] = temp
# exchange values in 'sort_index'
temp = sort_index[k]
sort_index[k] = sort_index[k+1]
sort_index[k+1] = temp
# applying the linear multiplier
for j in range(shape_quantity):
multiplier = small_end + float(j)/(shape_quantity-1) * (large_end-small_end)
deci_dist[i][sort_index[j]] = deci_dist[i][sort_index[j]] * multiplier
# normalize the distribution such that sum is 1.0
sum_temp = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i]/sum_temp
else:
# not applying linear multiplier when distribution difference is large
pass
else: # at least one neighbor has different opinion with host
converged_all = False # the network is not converged
# take unequal weights in the averaging process based on group sizes
deci_dist[i] = deci_dist_t[i]*robot_group_sizes[i] # start with host itself
for neighbor in conn_lists[i]:
# accumulate neighbor's distribution
deci_dist[i] = deci_dist[i] + deci_dist_t[neighbor]*robot_group_sizes[neighbor]
# normalize the distribution
sum_temp = np.sum(deci_dist[i])
deci_dist[i] = deci_dist[i] / sum_temp
# update the graphics
screen.fill(color_white)
# draw the regualr connecting lines
for i in range(swarm_size):
for j in range(i+1, swarm_size):
if conn_table[i,j]:
pygame.draw.line(screen, color_black, disp_poses[i], disp_poses[j],
conn_width_thin_consensus)
# draw the connecting lines marking the groups
for i in range(len(groups)):
group_len = len(groups[i])
for j in range(group_len):
for k in range(j+1, group_len):
j_robot = groups[i][j]
k_robot = groups[i][k]
# check if two robots in one group is connected
if conn_table[j_robot,k_robot]:
pygame.draw.line(screen, distinct_color_set[group_colors[i]],
disp_poses[j_robot], disp_poses[k_robot], conn_width_thick_consensus)
# draw the robots as dots
for i in range(swarm_size):
pygame.draw.circle(screen, distinct_color_set[robot_colors[i]],
disp_poses[i], robot_size_consensus, 0)
pygame.display.update()
# check exit condition for simulations 2
if converged_all:
shape_decision = deci_domi[0]
print("") # move cursor to the new line
print("converged to decision {}: {}".format(shape_decision,
shape_catalog[shape_decision]))
print("simulation 2 is finished")
if manual_mode: raw_input("<Press Enter to continue>")
print("") # empty line
break
########### simulation 3: consensus role assignment for the loop shape ###########
print("##### simulation 3: role assignment #####")
dist_conn_update() # need to update the network only once
# draw the network first time
disp_poses_update()
screen.fill(color_white)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size_consensus, 0)
for j in range(i+1, swarm_size):
if conn_table[i,j]:
pygame.draw.line(screen, color_black, disp_poses[i], disp_poses[j],
conn_width_thin_consensus)
pygame.display.update()
# calculate the gradient map for message transmission
gradients = np.copy(conn_table) # build gradient map on connection map
pool_gradient = 1 # gradients of the connections in the pool
pool_conn = {}
for i in range(swarm_size):
pool_conn[i] = conn_lists[i][:] # start with gradient 1 connections
while len(pool_conn.keys()) != 0:
source_deactivate = []
for source in pool_conn:
targets_temp = [] # the new targets
for target in pool_conn[source]:
for target_new in conn_lists[target]:
if target_new == source: continue # skip itself
if gradients[source, target_new] == 0:
gradients[source, target_new] = pool_gradient + 1
targets_temp.append(target_new)
if len(targets_temp) == 0:
source_deactivate.append(source)
else:
pool_conn[source] = targets_temp[:] # update with new targets
for source in source_deactivate:
pool_conn.pop(source) # remove the finished sources
pool_gradient = pool_gradient + 1
# calculate the relative gradient values
gradients_rel = []
# gradients_rel[i][j,k] refers to gradient of k relative to j with message source i
for i in range(swarm_size): # message source i
gradient_temp = np.zeros((swarm_size, swarm_size))
for j in range(swarm_size): # in the view point of j
gradient_temp[j] = gradients[i] - gradients[i,j]
gradients_rel.append(gradient_temp)
# list the neighbors a robot can send message to regarding a message source
neighbors_send = [[[] for j in range(swarm_size)] for i in range(swarm_size)]
# neighbors_send[i][j][k] means, if message from source i is received in j,
# it should be send to k
for i in range(swarm_size): # message source i
for j in range(swarm_size): # in the view point of j
for neighbor in conn_lists[j]:
if gradients_rel[i][j,neighbor] == 1:
neighbors_send[i][j].append(neighbor)
# initialize the role assignment variables
# preference distribution of all robots
pref_dist = np.random.rand(swarm_size, swarm_size) # no need to normalize it
initial_roles = np.argmax(pref_dist, axis=1) # the chosen role
# the local assignment information
local_role_assignment = [[[-1, 0, -1] for j in range(swarm_size)] for i in range(swarm_size)]
# local_role_assignment[i][j] is local assignment information of robot i for robot j
# first number is chosen role, second is probability, third is time stamp
local_robot_assignment = [[[] for j in range(swarm_size)] for i in range(swarm_size)]
# local_robot_assignment[i][j] is local assignment of robot i for role j
# contains a list of robots that choose role j
# populate the chosen role of itself to the local assignment information
for i in range(swarm_size):
local_role_assignment[i][i][0] = initial_roles[i]
local_role_assignment[i][i][1] = pref_dist[i, initial_roles[i]]
local_role_assignment[i][i][2] = 0
local_robot_assignment[i][initial_roles[i]].append(i)
# received message container for all robots
message_rx = [[] for i in range(swarm_size)]
# for each message entry, it containts:
# message[0]: ID of message source
# message[1]: its preferred role
# message[2]: probability of chosen role
# message[3]: time stamp
# all robots transmit once their chosen role before the loop
transmission_total = 0 # count message transmissions for each iteration
iter_count = 0 # also used as time stamp in message
for source in range(swarm_size):
chosen_role = local_role_assignment[source][source][0]
message_temp = [source, chosen_role, pref_dist[source, chosen_role], iter_count]
for target in conn_lists[source]: # send to all neighbors
message_rx[target].append(message_temp)
transmission_total = transmission_total + 1
role_color = [0 for i in range(swarm_size)] # colors for a conflicting role
# Dynamically manage color for conflicting robots is unnecessarily complicated, might just
# assign the colors in advance.
role_index_pool = range(swarm_size)
random.shuffle(role_index_pool)
color_index_pool = range(color_quantity)
random.shuffle(color_index_pool)
while len(role_index_pool) != 0:
role_color[role_index_pool[0]] = color_index_pool[0]
role_index_pool.pop(0)
color_index_pool.pop(0)
if len(color_index_pool) == 0:
color_index_pool = range(color_quantity)
random.shuffle(color_index_pool)
# flags
transmit_flag = [[False for j in range(swarm_size)] for i in range(swarm_size)]
# whether robot i should transmit received message of robot j
change_flag = [False for i in range(swarm_size)]
# whether robot i should change its chosen role
scheme_converged = [False for i in range(swarm_size)]
# the loop for simulation 3
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
time_period = 2000 # not frame_period
sim_freq_control = True
flash_delay = 200
sys.stdout.write("iteration {}".format(iter_count))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 3")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > time_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
sys.stdout.write("\riteration {}".format(iter_count))
sys.stdout.flush()
# process the received messages
# transfer messages to the processing buffer, then empty the message receiver
message_rx_buf = [[[k for k in j] for j in i] for i in message_rx]
message_rx = [[] for i in range(swarm_size)]
yield_robots = [] # the robots that are yielding on chosen roles
yield_roles = [] # the old roles of yield_robots before yielding
for i in range(swarm_size): # messages received by robot i
for message in message_rx_buf[i]:
source = message[0]
role = message[1]
probability = message[2]
time_stamp = message[3]
if source == i:
print("error, robot {} receives message of itself".format(i))
sys.exit()
if time_stamp > local_role_assignment[i][source][2]:
# received message will only take any effect if time stamp is new
# update local_robot_assignment
role_old = local_role_assignment[i][source][0]
if role_old >= 0: # has been initialized before, not -1
local_robot_assignment[i][role_old].remove(source)
local_robot_assignment[i][role].append(source)
# update local_role_assignment
local_role_assignment[i][source][0] = role
local_role_assignment[i][source][1] = probability
local_role_assignment[i][source][2] = time_stamp
transmit_flag[i][source] = True
# check conflict with itself
if role == local_role_assignment[i][i][0]:
if probability >= pref_dist[i, local_role_assignment[i][i][0]]:
# change its choice after all message received
change_flag[i] = True
yield_robots.append(i)
yield_roles.append(local_role_assignment[i][i][0])
# change the choice of role for those decide to
for i in range(swarm_size):
if change_flag[i]:
change_flag[i] = False
role_old = local_role_assignment[i][i][0]
pref_dist_temp = np.copy(pref_dist[i])
pref_dist_temp[local_role_assignment[i][i][0]] = -1
# set to negative to avoid being chosen
for j in range(swarm_size):
if len(local_robot_assignment[i][j]) != 0:
# eliminate those choices that have been taken
pref_dist_temp[j] = -1
role_new = np.argmax(pref_dist_temp)
if pref_dist_temp[role_new] < 0:
print("error, robot {} has no available role".format(i))
sys.exit()
# role_new is good to go
# update local_robot_assignment
local_robot_assignment[i][role_old].remove(i)
local_robot_assignment[i][role_new].append(i)
# update local_role_assignment
local_role_assignment[i][i][0] = role_new
local_role_assignment[i][i][1] = pref_dist[i][role_new]
local_role_assignment[i][i][2] = iter_count
transmit_flag[i][i] = True
# transmit the received messages or initial new message transmission
transmission_total = 0
for transmitter in range(swarm_size): # transmitter robot
for source in range(swarm_size): # message is for this source robot
if transmit_flag[transmitter][source]:
transmit_flag[transmitter][source] = False
message_temp = [source, local_role_assignment[transmitter][source][0],
local_role_assignment[transmitter][source][1],
local_role_assignment[transmitter][source][2]]
for target in neighbors_send[source][transmitter]:
message_rx[target].append(message_temp)
transmission_total = transmission_total + 1
# check if role assignment scheme is converged at every robot
for i in range(swarm_size):
if not scheme_converged[i]:
converged = True
for j in range(swarm_size):
if len(local_robot_assignment[i][j]) != 1:
converged = False
break
if converged:
scheme_converged[i] = True
# for display, scan the robots that have detected conflict but not yielding
persist_robots = []
for i in range(swarm_size):
if i in yield_robots: continue
if len(local_robot_assignment[i][local_role_assignment[i][i][0]]) > 1:
persist_robots.append(i)
# update the display
for i in range(swarm_size):
for j in range(i+1, swarm_size):
if conn_table[i,j]:
pygame.draw.line(screen, color_black, disp_poses[i], disp_poses[j],
conn_width_thin_consensus)
for i in range(swarm_size):
pygame.draw.circle(screen, color_black, disp_poses[i], robot_size_consensus, 0)
# draw the persisting robots with color of conflicting role
for i in persist_robots:
pygame.draw.circle(screen, distinct_color_set[role_color[local_role_assignment[i][i][0]]],
disp_poses[i], robot_size_consensus, 0)
# draw extra ring on robot if local scheme has converged
for i in range(swarm_size):
if scheme_converged[i]:
pygame.draw.circle(screen, color_black, disp_poses[i],
robot_ring_size, 1)
pygame.display.update()
# flash the yielding robots with color of old role
for _ in range(3):
# change to color
for i in range(len(yield_robots)):
pygame.draw.circle(screen, distinct_color_set[role_color[yield_roles[i]]],
disp_poses[yield_robots[i]], robot_size_consensus, 0)
pygame.display.update()
pygame.time.delay(flash_delay)
# change to black
for i in range(len(yield_robots)):
pygame.draw.circle(screen, color_black,
disp_poses[yield_robots[i]], robot_size_consensus, 0)
pygame.display.update()
pygame.time.delay(flash_delay)
# exit the simulation if all role assignment schemes have converged
all_converged = scheme_converged[0]
for i in range(1, swarm_size):
all_converged = all_converged and scheme_converged[i]
if not all_converged: break
if all_converged:
for i in range(swarm_size):
assignment_scheme[i] = local_role_assignment[0][i][0]
print("") # move cursor to the new line
print("simulation 3 is finished")
if manual_mode: raw_input("<Press Enter to continue>")
print("") # empty line
break
# # store the variable "assignment_scheme"
# # tmp_filepath = os.path.join('tmp', 'demo1_30_assignment_scheme')
# tmp_filepath = os.path.join('tmp', 'demo1_100_assignment_scheme')
# with open(tmp_filepath, 'w') as f:
# pickle.dump(assignment_scheme, f)
# raw_input("<Press Enter to continue>")
# break
########### simulation 4: loop formation with designated role assignment ###########
# # restore variable "assignment_scheme"
# # tmp_filepath = os.path.join('tmp', 'demo1_30_assignment_scheme')
# tmp_filepath = os.path.join('tmp', 'demo1_100_assignment_scheme')
# with open(tmp_filepath) as f:
# assignment_scheme = pickle.load(f)
print("##### simulation 4: loop formation #####")
# robot perperties
# all robots start with state '-1'
robot_states = np.array([-1 for i in range(swarm_size)])
# '-1' for wandering around, ignoring all connections
# '0' for wandering around, available to connection
# '1' for in a group, order on loop not satisfied, climbing CCW around loop
# '2' for in a group, order on loop satisfied
n1_life_lower = 2 # inclusive
n1_life_upper = 6 # exclusive
robot_n1_lives = np.random.uniform(n1_life_lower, n1_life_upper, swarm_size)
robot_oris = np.random.rand(swarm_size) * 2 * math.pi - math.pi # in range of [-pi, pi)
robot_key_neighbors = [[] for i in range(swarm_size)] # key neighbors for robot on loop
# for state '1' robot: the robot that it is climbing around
# for state '2' robot: the left and right neighbors in serial connection on the loop
# exception is the group has only two members, key neighbor will be only one robot
# group properties
groups = {}
# key is the group id, value is a list, in the list:
# [0]: a list of robots in the group, both state '1' and '2'
# [1]: remaining life time of the group
# [2]: whether or not being the dominant group
life_incre = 5 # number of seconds added to the life of a group when new robot joins
group_id_upper = swarm_size # upper limit of group id
robot_group_ids = np.array([-1 for i in range(swarm_size)]) # group id for the robots
# '-1' for not in a group
# use step moving distance in each update, instead of calculating from robot velocity
# so to make it independent of simulation frequency control
step_moving_dist = 0.05 # should be smaller than destination distance error
destination_error = 0.1
mov_vec_ratio = 0.5 # ratio used when calculating mov vector
# spring constants in SMA
linear_const = 1.0
bend_const = 0.8
disp_coef = 0.5
# for avoiding space too small on loop
space_good_thres = desired_space * 0.85
# the loop for simulation 4
sim_haulted = False
time_last = pygame.time.get_ticks()
time_now = time_last
frame_period = 50
sim_freq_control = True
iter_count = 0
# sys.stdout.write("iteration {}".format(iter_count)) # did nothing in iteration 0
print("swarm robots are forming an ordered loop ...")
loop_formed = False
ending_period = 1.0 # grace period
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: # close window button is clicked
print("program exit in simulation 4")
sys.exit() # exit the entire program
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
sim_haulted = not sim_haulted # reverse the pause flag
if sim_haulted: continue
# simulation frequency control
if sim_freq_control:
time_now = pygame.time.get_ticks()
if (time_now - time_last) > frame_period:
time_last = time_now
else:
continue
# increase iteration count
iter_count = iter_count + 1
# sys.stdout.write("\riteration {}".format(iter_count))
# sys.stdout.flush()
# state transition variables
st_n1to0 = [] # robot '-1' gets back to '0' after life time ends
# list of robots changing to '0' from '-1'
st_gton1 = [] # group disassembles either life expires, or triggered by others
# list of groups to be disassembled
st_0to1 = {} # robot '0' detects robot '2', join its group
# key is the robot '0', value is the group id
st_0to2 = {} # robot '0' detects another robot '0', forming a new group
# key is the robot '0', value is the other neighbor robot '0'
st_1to2 = {} # robot '1' is climbing around the loop, and finds its right position
# key is the left side of the slot, value is a list of robots intend to join
dist_conn_update() # update "relations" of the robots
# check state transitions, and schedule the tasks
for i in range(swarm_size):
if robot_states[i] == -1: # for host robot with state '-1'
if robot_n1_lives[i] < 0:
st_n1to0.append(i)
else:
if len(conn_lists[i]) == 0: continue
state2_list = []
state1_list = []
for j in conn_lists[i]:
if robot_states[j] == 2:
state2_list.append(j)
elif robot_states[j] == 1:
state1_list.append(j)
if len(state2_list) + len(state1_list) != 0:
groups_local, group_id_max = S14_robot_grouping(
state2_list + state1_list, robot_group_ids, groups)
if len(groups_local.keys()) > 1:
# disassemble all groups except the largest one
for group_id_temp in groups_local.keys():
if ((group_id_temp != group_id_max) and
(group_id_temp not in st_gton1)):
# schedule to disassemble this group
st_gton1.append(group_id_temp)
else:
# state '-1' robot can only be repelled away by state '2' robot
if len(state2_list) != 0:
# find the closest neighbor in groups_local[group_id_max]
robot_closest = S14_closest_robot(i, state2_list)
# change moving direction opposing the closest robot
vect_temp = robot_poses[i] - robot_poses[robot_closest]
robot_oris[i] = math.atan2(vect_temp[1], vect_temp[0])
elif robot_states[i] == 0: # for host robot with state '0'
if len(conn_lists[i]) == 0: continue
state2_list = []
state1_list = []
state0_list = []
for j in conn_lists[i]:
if robot_states[j] == 2:
state2_list.append(j)
elif robot_states[j] == 1:
state1_list.append(j)
elif robot_states[j] == 0:
state0_list.append(j)
state2_quantity = len(state2_list)
state1_quantity = len(state1_list)
state0_quantity = len(state0_list)
# disassemble minority groups if there are multiple groups
if state2_quantity + state1_quantity > 1:
# there is in-group robot in the neighbors
groups_local, group_id_max = S14_robot_grouping(state2_list+state1_list,
robot_group_ids, groups)
# disassmeble all groups except the largest one
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
# responses to the state '2', '1', and '0' robots
if state2_quantity != 0:
# join the group with state '2' robots
if state2_quantity == 1: # only one state '2' robot
# join the group of the state '2' robot
st_0to1[i] = robot_group_ids[state2_list[0]]
robot_key_neighbors[i] = [state2_list[0]] # add key neighbor
else: # multiple state '2' robots
# it's possible that the state '2' robots are in different groups
# find the closest one in the largest group, and join the group
groups_local, group_id_max = S14_robot_grouping(state2_list,
robot_group_ids, groups)
robot_closest = S14_closest_robot(i, groups_local[group_id_max])
st_0to1[i] = group_id_max
robot_key_neighbors[i] = [robot_closest] # add key neighbor
# elif state1_quantity != 0:
# # get repelled away from state '1' robot
# if state1_quantity == 1: # only one state '1' robot
# vect_temp = robot_poses[i] - robot_poses[state1_list[0]]
# robot_oris[i] = math.atan2(vect_temp[1], vect_temp[0])
# else:
# groups_local, group_id_max = S14_robot_grouping(state1_list,
# robot_group_ids, groups)
# robot_closest = S14_closest_robot(i, groups_local[group_id_max])
# vect_temp = robot_poses[i] - robot_poses[robot_closest]
# robot_oris[i] = math.atan2(vect_temp[1], vect_temp[0])
elif state0_quantity != 0:
# form new group with state '0' robots
st_0to2[i] = S14_closest_robot(i, state0_list)
elif (robot_states[i] == 1) or (robot_states[i] == 2):
# disassemble the minority groups
state12_list = [] # list of state '1' and '2' robots in the list
has_other_group = False
host_group_id = robot_group_ids[i]
for j in conn_lists[i]:
if (robot_states[j] == 1) or (robot_states[j] == 2):
state12_list.append(j)
if robot_group_ids[j] != host_group_id:
has_other_group = True
if has_other_group:
groups_local, group_id_max = S14_robot_grouping(state12_list,
robot_group_ids, groups)
for group_id_temp in groups_local.keys():
if (group_id_temp != group_id_max) and (group_id_temp not in st_gton1):
st_gton1.append(group_id_temp) # schedule to disassemble this group
# check if state '1' robot's order on loop is good
if robot_states[i] == 1:
current_key = robot_key_neighbors[i][0]
next_key = robot_key_neighbors[current_key][-1]
if next_key in conn_lists[i]: # next key neighbor is detected
role_i = assignment_scheme[i]
role_current_key = assignment_scheme[current_key]
role_next_key = assignment_scheme[next_key]
if len(robot_key_neighbors[current_key]) == 1:
# the situation that only two members are in the group
vect_temp = robot_poses[next_key] - robot_poses[current_key]
# deciding which side of vect_temp robot i is at
vect_i = robot_poses[i] - robot_poses[current_key]
side_value = np.cross(vect_i, vect_temp)
if side_value > 0: # robot i on the right side
if role_next_key > role_current_key:
if (role_i < role_current_key) or (role_i > role_next_key):
# update with next key neighbor
robot_key_neighbors[i] = [next_key]
else:
# its position on loop is achieved, becoming state '2'
if current_key in st_1to2.keys():
st_1to2[current_key].append(i)
else:
st_1to2[current_key] = [i]
else:
if (role_i < role_current_key) and (role_i > role_next_key):
# update with next key neighbor
robot_key_neighbors[i] = [next_key]
else:
# its position on loop is achieved, becoming state '2'
if current_key in st_1to2.keys():
st_1to2[current_key].append(i)
else:
st_1to2[current_key] = [i]
else: # robot i on the left side
pass
else:
# the situation that at least three members are in the group
if role_next_key > role_current_key:
# the roles of the two robots are on the same side
if (role_i < role_current_key) or (role_i > role_next_key):
# update with next key neighbor
robot_key_neighbors[i] = [next_key]
else:
# its position on loop is achieved, becoming state '2'
if current_key in st_1to2.keys():
st_1to2[current_key].append(i)
else:
st_1to2[current_key] = [i]
else:
# the roles of the two robots are on different side
if (role_i < role_current_key) and (role_i > role_next_key):
# update with next key neighbor
robot_key_neighbors[i] = [next_key]
else:
# its position on loop is achieved, becoming state '2'
if current_key in st_1to2.keys():
st_1to2[current_key].append(i)
else:
st_1to2[current_key] = [i]
# check the life time of the groups; schedule disassembling if expired
for group_id_temp in groups.keys():
if groups[group_id_temp][1] < 0: # life time of a group ends
if group_id_temp not in st_gton1:
st_gton1.append(group_id_temp)
# process the state transition tasks
# 1.st_1to2, robot '1' locates its order on loop, becoming '2'
for left_key in st_1to2.keys():
right_key = robot_key_neighbors[left_key][-1]
role_left_key = assignment_scheme[left_key]
role_right_key = assignment_scheme[right_key]
joiner = -1 # the accepted joiner in this position
if len(st_1to2[left_key]) == 1:
joiner = st_1to2[left_key][0]
else:
joiner_list = st_1to2[left_key]
role_dist_min = swarm_size
for joiner_temp in joiner_list:
# find the joiner with closest role to left key neighbor
role_joiner_temp = assignment_scheme[joiner_temp]
role_dist_temp = role_joiner_temp - role_left_key
if role_dist_temp < 0:
role_dist_temp = role_dist_temp + swarm_size
if role_dist_temp < role_dist_min:
joiner = joiner_temp
role_dist_min = role_dist_temp
# put in the new joiner
# update the robot properties
group_id_temp = robot_group_ids[left_key]
robot_states[joiner] = 2
robot_group_ids[joiner] = group_id_temp
robot_key_neighbors[joiner] = [left_key, right_key]
if len(robot_key_neighbors[left_key]) == 1:
robot_key_neighbors[left_key] = [right_key, joiner]
robot_key_neighbors[right_key] = [joiner, left_key]
else:
robot_key_neighbors[left_key][1] = joiner
robot_key_neighbors[right_key][0] = joiner
# no need to update the group properties
# 2.st_0to1, robot '0' joins a group, becoming '1'
for joiner in st_0to1.keys():
group_id_temp = st_0to1[joiner]
# update the robot properties
robot_states[joiner] = 1
robot_group_ids[joiner] = group_id_temp
# update the group properties
groups[group_id_temp][0].append(joiner)
groups[group_id_temp][1] = groups[group_id_temp][1] + life_incre
# 3.st_0to2, robot '0' forms new group with '0', both becoming '2'
while len(st_0to2.keys()) != 0:
pair0 = st_0to2.keys()[0]
pair1 = st_0to2[pair0]
st_0to2.pop(pair0)
if (pair1 in st_0to2.keys()) and (st_0to2[pair1] == pair0):
st_0to2.pop(pair1)
# only forming a group if there is at least one seed robot in the pair
if robot_seeds[pair0] or robot_seeds[pair1]:
# forming new group for robot pair0 and pair1
group_id_temp = np.random.randint(0, group_id_upper)
while group_id_temp in groups.keys():
group_id_temp = np.random.randint(0, group_id_upper)
# update properties of the robots
robot_states[pair0] = 2
robot_states[pair1] = 2
robot_group_ids[pair0] = group_id_temp
robot_group_ids[pair1] = group_id_temp
robot_key_neighbors[pair0] = [pair1]
robot_key_neighbors[pair1] = [pair0]
# update properties of the group
groups[group_id_temp] = [[pair0, pair1], life_incre*2, False]
# 4.st_gton1, groups get disassembled, life time ends or triggered by others
for group_id_temp in st_gton1:
for robot_temp in groups[group_id_temp][0]:
robot_states[robot_temp] = -1
robot_n1_lives[robot_temp] = np.random.uniform(n1_life_lower, n1_life_upper)
robot_group_ids[robot_temp] = -1
robot_oris[robot_temp] = np.random.rand() * 2 * math.pi - math.pi
robot_key_neighbors[robot_temp] = []
groups.pop(group_id_temp)
# 5.st_n1to0, life time of robot '-1' ends, get back to '0'
for robot_temp in st_n1to0:
robot_states[robot_temp] = 0
# check if a group becomes dominant
for group_id_temp in groups.keys():
if len(groups[group_id_temp][0]) > swarm_size/2.0:
groups[group_id_temp][2] = True
else:
groups[group_id_temp][2] = False
# update the physics
robot_poses_t = np.copy(robot_poses) # as old poses
no_state1_robot = True
for i in range(swarm_size):
# adjusting moving direction for state '1' and '2' robots
if robot_states[i] == 1:
no_state1_robot = False
# rotating around its only key neighbor
center = robot_key_neighbors[i][0] # the center robot
# use the triangle of (desired_space, dist_table[i,center], step_moving_dist)
if dist_table[i,center] > (desired_space + step_moving_dist):
# moving toward the center robot
robot_oris[i] = math.atan2(robot_poses_t[center,1] - robot_poses_t[i,1],
robot_poses_t[center,0] - robot_poses_t[i,0])
elif (dist_table[i,center] + step_moving_dist) < desired_space:
# moving away from the center robot
robot_oris[i] = math.atan2(robot_poses_t[i,1] - robot_poses_t[center,1],
robot_poses_t[i,0] - robot_poses_t[center,0])
else:
# moving tangent along the circle of radius of "desired_space"
robot_oris[i] = math.atan2(robot_poses_t[i,1] - robot_poses_t[center,1],
robot_poses_t[i,0] - robot_poses_t[center,0])
# interior angle between
int_angle_temp = math.acos(np.around((math.pow(dist_table[i,center],2) +
math.pow(step_moving_dist,2) - math.pow(desired_space,2)) /
(2.0*dist_table[i,center]*step_moving_dist)))
robot_oris[i] = reset_radian(robot_oris[i] + (math.pi - int_angle_temp))
elif robot_states[i] == 2:
# adjusting position to maintain the loop
if len(robot_key_neighbors[i]) == 1:
# situation that only two robots are in the group
j = robot_key_neighbors[i][0]
if abs(dist_table[i,j] - desired_space) < destination_error:
continue # stay in position if within destination error
else:
if dist_table[i,j] > desired_space:
robot_oris[i] = math.atan2(robot_poses_t[j,1] - robot_poses_t[i,1],
robot_poses_t[j,0] - robot_poses_t[i,0])
else:
robot_oris[i] = math.atan2(robot_poses_t[i,1] - robot_poses_t[j,1],
robot_poses_t[i,0] - robot_poses_t[j,0])
else:
# normal situation with at least three members in the group
group_id_temp = robot_group_ids[i]
state2_quantity = 0 # number of state '2' robots
for robot_temp in groups[group_id_temp][0]:
if robot_states[robot_temp] == 2:
state2_quantity = state2_quantity + 1
desired_angle = math.pi - 2*math.pi / state2_quantity
# use the SMA algorithm to achieve the desired interior angle
left_key = robot_key_neighbors[i][0]
right_key = robot_key_neighbors[i][1]
vect_l = (robot_poses_t[left_key] - robot_poses_t[i]) / dist_table[i,left_key]
vect_r = (robot_poses_t[right_key] - robot_poses_t[i]) / dist_table[i,right_key]
vect_lr = robot_poses_t[right_key] - robot_poses_t[left_key]
vect_lr_dist = np.linalg.norm(vect_lr)
vect_in = np.array([-vect_lr[1], vect_lr[0]]) / vect_lr_dist
inter_curr = math.acos(np.dot(vect_l, vect_r)) # interior angle
if np.cross(vect_r, vect_l) < 0:
inter_curr = 2*math.pi - inter_curr
fb_vect = np.zeros(2) # feedback vector to accumulate spring effects
fb_vect = fb_vect + ((dist_table[i,left_key] - desired_space) *
linear_const * vect_l)
fb_vect = fb_vect + ((dist_table[i,right_key] - desired_space) *
linear_const * vect_r)
fb_vect = fb_vect + ((desired_angle - inter_curr) *
bend_const * vect_in)
if ( | np.linalg.norm(fb_vect) | numpy.linalg.norm |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.