prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
__author__ = 'noe'
import keras
import numpy as np
def connect(input_layer, layers):
""" Connect the given sequence of layers and returns output layer
Parameters
----------
input_layer : keras layer
Input layer
layers : list of keras layers
Layers to be connected sequentially
Returns
-------
output_layer : kears layer
Output Layer
"""
layer = input_layer
for l in layers:
layer = l(layer)
return layer
def plot_network(network):
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(network).create(prog='dot', format='svg'))
def layer_to_dict(layer):
d = {'config' : keras.layers.serialize(layer),
'input_shape' : layer.input_shape,
'weights' : layer.get_weights()}
return d
def layer_from_dict(d):
layer = keras.layers.deserialize(d['config'])
layer.build(d['input_shape'])
layer.set_weights(d['weights'])
return layer
def serialize_layers(list_of_layers):
""" Returns a serialized version of the list of layers (recursive)
Parameters
----------
list_of_layers : list or layer
list of list of lists or kears layer
"""
if isinstance(list_of_layers, keras.layers.Layer):
return layer_to_dict(list_of_layers)
return [serialize_layers(l) for l in list_of_layers]
def deserialize_layers(S):
""" Returns lists of lists of layers from a given serialization
Parameters
----------
S : list of list of dict (recursive)
dictionary obtained with serialize_layers
"""
if isinstance(S, dict):
return layer_from_dict(S)
return [deserialize_layers(l) for l in S]
def shuffle(x):
""" Shuffles the rows of matrix data x.
Returns
-------
x_shuffled : array
Shuffled data
"""
Ishuffle = np.argsort( | np.random.rand(x.shape[0]) | numpy.random.rand |
import hashlib
import logging
import os
import threading
import time
from tkinter import *
import cv2
import numpy as np
from PIL import Image, ImageTk
from src.ImageProcessing.contouring import cnt_from_img, save_contour, save_image
class ResizingImageCanvas(Canvas):
"""
Customized Canvas that can handle dynamic image resizing and displays for slice contours.
"""
def __init__(self, parent=None, image=None, dicom_manager=None, **kwargs):
"""
Initializer
:param parent: The parent to this tk Element
:param image: The image to load
:param dicom_manager: The DicomManager instance to assign to this class
:param kwargs: Keyword arguments to pass to parent
"""
Canvas.__init__(self, **kwargs)
self.parent = parent
self.dm = dicom_manager
self.logger = logging.getLogger(__name__)
# Configure key and mouse bindings
self.bind("<Key>", self.keydispatch)
self.bind("<Configure>", self.on_resize)
self.bind("<Button-1>", self.create_point)
self.bind("<Button-3>", self.plot_points)
self.bind("<Button-2>", self.toggle_smoothing)
self.configure(cursor="crosshair red")
self.configure()
# Configure window size
self.height = self.winfo_reqheight()
self.width = self.winfo_reqwidth()
# Configure contour parameters
self.user_points = []
self.spline = 0
self.new_point = False
self.user_line_tag = "usr_line"
self.user_point_tag = "usr_point"
self.contour_line_tag = "cnt_line"
self.contour_point_tag = "cnt_point"
self.contours = None
self.curr_contour = 0
self.cnt_points = []
self.contour_image = None
self.contour_photo = None
# Configure image parameters
self.image_path = ""
self.image_folder = ""
self.image_names = []
self.image_idx = 0
self.image = None
self.photo = None
# Configure ROI parameters
self.roi = None
self.roi_set = False
self.cnt_img = None
self.thresh_val = 70
self.ready = False
self.set_image(image)
def set_dm(self, dm):
if dm is not None:
self.logger.info("Got new DICOMManager")
self.dm = dm
def keydispatch(self, event):
"""
Receives key events and chooses the appropriate action.
:param event: The key event to process
"""
self.logger.debug("User pressed: '{}'".format(event.keysym))
if event.keysym == "Right":
self.update_contour_idx(1)
if event.keysym == "Left":
self.update_contour_idx(-1)
if event.keysym == "Down":
self.export_contour(self.curr_contour)
if event.keysym == "a":
self.logger.info("Current image: {}".format(self.image_idx))
self.update_image_idx(-1)
if event.keysym == "d":
self.logger.info("Current image: {}".format(self.image_idx))
self.update_image_idx(1)
if event.keysym == "x":
self.clear_points()
if event.keysym == "c":
self.apply_corrections()
if event.keysym == "equal" or event.keysym == "plus":
self.update_thresh(1)
if event.keysym == "minus":
self.update_thresh(-1)
if event.keysym == "r":
self.activate_roi()
def activate_roi(self):
"""
Activates the region of interest that the user selected.
"""
img_arr = self.dm.get_image_array(self.image_idx)
self.roi = cv2.selectROI(
cv2.cvtColor( | np.asarray(img_arr, np.uint8) | numpy.asarray |
import numpy as np
from math import *
import sys
from calc_dmod import calc_lumd
#from calc_kcor import calc_kcor
'''
# get_colors
#
# Takes a list of lines from an SN data file and parses the SN parameters and host colors
# Returns two arrays, one containing arrays of SN peak mag, SALT s, SALT2 x0, x1, and c parameters, and the
# separation of the SN from the host nucleus, and the other containing array pairs of host colors and errors,
# so that plotting can be done easily.
'''
def get_colors(line_list):
mag=[]
mag_err=[]
s=[]
s_err=[]
c=[]
c_err=[]
x0=[]
x0_err=[]
x1=[]
x1_err=[]
sep=[]
u_mag=[]
u_err=[]
g_mag=[]
g_err=[]
r_mag=[]
r_err=[]
i_mag=[]
i_err=[]
z_mag=[]
z_err=[]
for line1 in line_list:
if line1[0]=='#': continue
line=line1.split(',')
if len(line)<2: continue #This is to prevent an error if the line is too short
if line[42]=='0.0': continue #Make sure there is an r-band R_e
redshift=float(line[4])
lumd=calc_lumd(redshift)
dmod=5*np.log10(lumd*10**6)-5
mag.append(float(line[5])-dmod)
if line[6]=='': mag_err.append(0)
else: mag_err.append(float(line[6]))
c.append(float(line[11]))
c_err.append(float(line[12]))
s.append(float(line[13]))
s_err.append(float(line[14]))
sep.append(np.log10(float(line[15])/float(line[42])))
if line[7]=='' or line[9]=='':
x0.append(-99)
x0_err.append(-99)
x1.append(-99)
x1_err.append(-99)
else:
x0.append(float(line[7]))
x0_err.append(float(line[8]))
x1.append(float(line[9]))
x1_err.append(float(line[10]))
u_mag.append(float(line[18]))
u_err.append(float(line[19]))
g_mag.append(float(line[20]))
g_err.append(float(line[21]))
r_mag.append(float(line[22]))
r_err.append(float(line[23]))
i_mag.append(float(line[24]))
i_err.append(float(line[25]))
z_mag.append(float(line[26]))
z_err.append(float(line[27]))
# Convert lists to arrays for manipulation
mag=np.array(mag)
mag_err=np.array(mag_err)
s=np.array(s)
s_err=np.array(s_err)
c=np.array(c)
c_err=np.array(c_err)
x0=np.array(x0)
x0_err=np.array(x0_err)
x1=np.array(x1)
x1_err=np.array(x1_err)
sep=np.array(sep)
u_mag=np.array(u_mag)
u_err=np.array(u_err)
g_mag=np.array(g_mag)
g_err=np.array(g_err)
r_mag=np.array(r_mag)
r_err=np.array(r_err)
i_mag=np.array(i_mag)
i_err=np.array(i_err)
z_mag=np.array(z_mag)
z_err=np.array(z_err)
ug=u_mag-g_mag
ug_err=np.sqrt(u_err**2+g_err**2)
ur=u_mag-r_mag
ur_err=np.sqrt(u_err**2+r_err**2)
ui=u_mag-i_mag
ui_err=np.sqrt(u_err**2+i_err**2)
uz=u_mag-z_mag
uz_err=np.sqrt(u_err**2+z_err**2)
gr=g_mag-r_mag
gr_err=np.sqrt(g_err**2+r_err**2)
gi=g_mag-i_mag
gi_err=np.sqrt(g_err**2+i_err**2)
gz=g_mag-z_mag
gz_err=np.sqrt(g_err**2+z_err**2)
ri=r_mag-i_mag
ri_err=np.sqrt(r_err**2+i_err**2)
rz=r_mag-z_mag
rz_err=np.sqrt(r_err**2+z_err**2)
iz=i_mag-z_mag
iz_err=np.sqrt(i_err**2+z_err**2)
sn_array=np.array([np.array([mag,mag_err]),np.array([s,s_err]),np.array([c,c_err]),np.array([x0,x0_err]),np.array([x1,x1_err]),sep])
color_array=np.array([np.array([ug,ug_err]),np.array([ur,ur_err]),np.array([ui,ui_err]),np.array([uz,uz_err]),np.array([gr,gr_err]),np.array([gi,gi_err]),np.array([gz,gz_err]),np.array([ri,ri_err]),np.array([rz,rz_err]), | np.array([iz,iz_err]) | numpy.array |
import numpy as np
from random import random
from numba import njit
import random as rand
import matplotlib.pyplot as plt
class RotSurCode():
nbr_eq_classes = 4
def __init__(self, size):
self.system_size = size
self.qubit_matrix = np.zeros((self.system_size, self.system_size), dtype=np.uint8)
self.plaquette_defects = np.zeros((size + 1, size + 1))
def generate_random_error(self, p_x, p_y, p_z):
size = self.system_size
for i in range(size):
for j in range(size):
q = 0
r = rand.random()
if r < p_z:
q = 3
if p_z < r < (p_z + p_x):
q = 1
if (p_z + p_x) < r < (p_z + p_x + p_y):
q = 2
self.qubit_matrix[i, j] = q
self.syndrome()
def generate_zbiased_error(self, p_error, eta): # Z-biased noise
eta = eta
p = p_error
p_z = p * eta / (eta + 1)
p_x = p / (2 * (eta + 1))
p_y = p_x
size = self.system_size
for i in range(size):
for j in range(size):
q = 0
r = rand.random()
if r < p_z:
q = 3
elif p_z < r < (p_z + p_x):
q = 1
elif (p_z + p_x) < r < (p_z + p_x + p_y):
q = 2
self.qubit_matrix[i, j] = q
self.syndrome()
# def generate_random_error(self, p_error, eta): # Y-biased noise
# eta = eta
# p = p_error
# p_y = p * eta / (eta + 1)
# p_x = p / (2 * (eta + 1))
# p_z = p_x
# size = self.system_size
# for i in range(size):
# for j in range(size):
# q = 0
# r = rand.random()
# if r < p_y:
# q = 2
# elif p_y < r < (p_y + p_x):
# q = 1
# elif (p_y + p_x) < r < (p_y + p_x + p_z):
# q = 3
# self.qubit_matrix[i, j] = q
def chain_lengths(self):
nx = np.count_nonzero(self.qubit_matrix[:, :] == 1)
ny = np.count_nonzero(self.qubit_matrix[:, :] == 2)
nz = np.count_nonzero(self.qubit_matrix[:, :] == 3)
return nx, ny, nz
def count_errors(self):
return _count_errors(self.qubit_matrix)
def apply_logical(self, operator: int, X_pos=0, Z_pos=0):
return _apply_logical(self.qubit_matrix, operator, X_pos, Z_pos)
def apply_stabilizer(self, row: int, col: int, operator: int):
return _apply_stabilizer(self.qubit_matrix, row, col, operator)
def apply_random_logical(self):
return _apply_random_logical(self.qubit_matrix)
def apply_random_stabilizer(self):
return _apply_random_stabilizer(self.qubit_matrix)
def apply_stabilizers_uniform(self, p=0.5):
return _apply_stabilizers_uniform(self.qubit_matrix, p)
def define_equivalence_class(self):
return _define_equivalence_class(self.qubit_matrix)
def to_class(self, eq):
eq_class = self.define_equivalence_class()
op = eq_class ^ eq
return self.apply_logical(op)[0]
def syndrome(self):
size = self.qubit_matrix.shape[1]
qubit_matrix = self.qubit_matrix
for i in range(size-1):
for j in range(size-1):
self.plaquette_defects[i+1, j+1] = _find_syndrome(qubit_matrix, i, j, 1)
for i in range(int((size - 1)/2)):
for j in range(4):
row = 0
col = 0
if j == 0:
row = 0
col = 2 * i + 2
elif j == 1:
row = 2 * i + 2
col = size
elif j == 2:
row = size
col = 2 * i + 1
elif j == 3:
row = 2 * i + 1
col = 0
self.plaquette_defects[row, col] = _find_syndrome(qubit_matrix, i, j, 3)
def plot(self, title):
system_size = self.system_size
xLine = np.linspace(0, system_size - 1, system_size)
a = range(system_size)
X, Y = np.meshgrid(a, a)
XLine, YLine = np.meshgrid(a, xLine)
plaquette_defect_coordinates = np.where(self.plaquette_defects)
x_error = np.where(self.qubit_matrix[:, :] == 1)
y_error = np.where(self.qubit_matrix[:, :] == 2)
z_error = np.where(self.qubit_matrix[:, :] == 3)
def generate_semicircle(center_x, center_y, radius, stepsize=0.1):
x = np.arange(center_x, center_x + radius + stepsize, stepsize)
y = np.sqrt(radius ** 2 - x ** 2)
x = np.concatenate([x, x[::-1]])
y = np.concatenate([y, -y[::-1]])
return x, y + center_y
markersize_qubit = 15
markersize_excitation = 7
markersize_symbols = 7
linewidth = 2
# Plot grid lines
ax = plt.subplot(111)
x, y = generate_semicircle(0, 1, 0.5, 0.01)
for i in range(int((system_size - 1) / 2)):
ax.plot(y + 0.5 + i * 2, x + system_size - 1, color='black', linewidth=linewidth)
ax.plot(-y + 1.5 + 2 * i, -x, color='black', linewidth=linewidth)
ax.plot(x + system_size - 1, y - 0.5 + i * 2, color='black', linewidth=linewidth)
ax.plot(-x, -y + 0.5 + system_size - 1 - 2 * i, color='black', linewidth=linewidth)
ax.plot(XLine, YLine, 'black', linewidth=linewidth)
ax.plot(YLine, XLine, 'black', linewidth=linewidth)
ax.plot(X, Y, 'o', color='black', markerfacecolor='white', markersize=markersize_qubit + 1)
ax.plot(x_error[1], system_size - 1 - x_error[0], 'o', color='blue', markersize=markersize_symbols, marker=r'$X$')
ax.plot(y_error[1], system_size - 1 - y_error[0], 'o', color='blue', markersize=markersize_symbols, marker=r'$Y$')
ax.plot(z_error[1], system_size - 1 - z_error[0], 'o', color='blue', markersize=markersize_symbols, marker=r'$Z$')
for i in range(len(plaquette_defect_coordinates[1])):
if plaquette_defect_coordinates[1][i] == 0:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5 + 0.25, system_size - plaquette_defect_coordinates[0][i] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
elif plaquette_defect_coordinates[0][i] == 0:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5, system_size - plaquette_defect_coordinates[0][i] - 0.5 - 0.25, 'o', color='red', label="flux", markersize=markersize_excitation)
elif plaquette_defect_coordinates[1][i] == system_size:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5 - 0.25, system_size - plaquette_defect_coordinates[0][i] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
elif plaquette_defect_coordinates[0][i] == system_size:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5, system_size - plaquette_defect_coordinates[0][i] - 0.5 + 0.25, 'o', color='red', label="flux", markersize=markersize_excitation)
else:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5, system_size - plaquette_defect_coordinates[0][i] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
# ax.plot(plaquette_defect_coordinates[1] - 0.5, system_size - plaquette_defect_coordinates[0] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
ax.axis('off')
plt.axis('equal')
#plt.show()
plt.savefig('plots/graph_'+str(title)+'.png')
# plt.close()
@njit('(uint8[:,:],)')
def _count_errors(qubit_matrix):
return np.count_nonzero(qubit_matrix)
@njit('(uint8[:,:], int64, int64, int64)')
def _find_syndrome(qubit_matrix, row: int, col: int, operator: int):
def flip(a):
if a == 0:
return 1
elif a == 1:
return 0
size = qubit_matrix.shape[1]
result_qubit_matrix = np.copy(qubit_matrix)
defect = 0
op = 0
if operator == 1: # full
qarray = [[0 + row, 0 + col], [0 + row, 1 + col], [1 + row, 0 + col], [1 + row, 1 + col]]
if row % 2 == 0:
if col % 2 == 0:
op = 1
else:
op = 3
else:
if col % 2 == 0:
op = 3
else:
op = 1
elif operator == 3: # half
if col == 0:
op = 1
qarray = [[0, row*2 + 1], [0, row*2 + 2]]
elif col == 1:
op = 3
qarray = [[row*2 + 1, size - 1], [row*2 + 2, size - 1]]
elif col == 2:
op = 1
qarray = [[size - 1, row*2], [size - 1, row*2 + 1]]
elif col == 3:
op = 3
qarray = [[row*2, 0], [row*2 + 1, 0]]
for i in qarray:
old_qubit = result_qubit_matrix[i[0], i[1]]
if old_qubit != 0 and old_qubit != op:
defect = flip(defect)
return defect
@njit('(uint8[:,:], int64, int64, int64)') # Z-biased noise
def _apply_logical(qubit_matrix, operator: int, X_pos=0, Z_pos=0):
result_qubit_matrix = np.copy(qubit_matrix)
# List to store how errors redestribute when logical is applied
n_eq = [0, 0, 0, 0]
if operator == 0:
return result_qubit_matrix, (0, 0, 0)
size = qubit_matrix.shape[0]
do_X = (operator == 1 or operator == 2)
do_Z = (operator == 3 or operator == 2)
if do_X:
for i in range(size):
old_qubit = result_qubit_matrix[i, X_pos]
new_qubit = 1 ^ old_qubit
result_qubit_matrix[i, X_pos] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
if do_Z:
for i in range(size):
old_qubit = result_qubit_matrix[Z_pos, i]
new_qubit = 3 ^ old_qubit
result_qubit_matrix[Z_pos, i] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
return result_qubit_matrix, (n_eq[1], n_eq[2], n_eq[3])
@njit('(uint8[:,:],)')
def _apply_random_logical(qubit_matrix):
size = qubit_matrix.shape[0]
op = int(random() * 4)
if op == 1 or op == 2:
X_pos = int(random() * size)
else:
X_pos = 0
if op == 3 or op == 2:
Z_pos = int(random() * size)
else:
Z_pos = 0
return _apply_logical(qubit_matrix, op, X_pos, Z_pos)
@njit('(uint8[:,:], int64, int64, int64)')
def _apply_stabilizer(qubit_matrix, row: int, col: int, operator: int):
size = qubit_matrix.shape[0]
result_qubit_matrix = np.copy(qubit_matrix)
# List to store how errors redestribute when stabilizer is applied
n_eq = [0, 0, 0, 0]
op = 0
if operator == 1: # full
qarray = [[0 + row, 0 + col], [0 + row, 1 + col], [1 + row, 0 + col], [1 + row, 1 + col]]
if row % 2 == 0:
if col % 2 == 0:
op = 1
else:
op = 3
else:
if col % 2 == 0:
op = 3
else:
op = 1
elif operator == 3: # half
if col == 0:
op = 1
qarray = [[0, row*2 + 1], [0, row*2 + 2]]
elif col == 1:
op = 3
qarray = [[row*2 + 1, size - 1], [row*2 + 2, size - 1]]
elif col == 2:
op = 1
qarray = [[size - 1, row*2], [size - 1, row*2 + 1]]
elif col == 3:
op = 3
qarray = [[row*2, 0], [row*2 + 1, 0]]
for i in qarray:
old_qubit = result_qubit_matrix[i[0], i[1]]
new_qubit = op ^ old_qubit
result_qubit_matrix[i[0], i[1]] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
return result_qubit_matrix, (n_eq[1], n_eq[2], n_eq[3])
@njit('(uint8[:,:],)')
def _apply_random_stabilizer(qubit_matrix):
size = qubit_matrix.shape[0]
rows = int((size-1)*random())
cols = int((size-1)*random())
rows2 = int(((size - 1)/2) * random())
cols2 = int(4 * random())
phalf = (size**2 - (size-1)**2 - 1)/(size**2-1)
if rand.random() > phalf:
# operator = 1 = full stabilizer
return _apply_stabilizer(qubit_matrix, rows, cols, 1)
else:
# operator = 3 = half stabilizer
return _apply_stabilizer(qubit_matrix, rows2, cols2, 3)
@njit('(uint8[:,:],)')
def _define_equivalence_class(qubit_matrix):
x_errors = np.count_nonzero(qubit_matrix[0, :] == 1)
x_errors += np.count_nonzero(qubit_matrix[0, :] == 2)
z_errors = np.count_nonzero(qubit_matrix[:, 0] == 3)
z_errors += | np.count_nonzero(qubit_matrix[:, 0] == 2) | numpy.count_nonzero |
"""
Methods for estimating thresholded cluster maps from neuroimaging contrasts
(Contrasts) from sets of foci and optional additional information (e.g., sample
size and test statistic values).
NOTE: Currently imagining output from "dataset.get_coordinates" as a DataFrame
of peak coords and sample sizes/statistics (a la Neurosynth).
"""
from __future__ import division
import numpy as np
import nibabel as nib
from ...base import KernelEstimator
from .utils import compute_ma, get_ale_kernel
__all__ = ['ALEKernel', 'MKDAKernel', 'KDAKernel']
class ALEKernel(KernelEstimator):
"""
Generate ALE modeled activation images from coordinates and sample size.
"""
def __init__(self, coordinates, mask):
self.mask = mask
self.coordinates = coordinates
self.fwhm = None
self.n = None
def transform(self, ids, fwhm=None, n=None, masked=False):
"""
Generate ALE modeled activation images for each Contrast in dataset.
Parameters
----------
fwhm : :obj:`float`, optional
Full-width half-max for Gaussian kernel, if you want to have a
constant kernel across Contrasts. Mutually exclusive with ``n``.
n : :obj:`int`, optional
Sample size, used to derive FWHM for Gaussian kernel based on
formulae from Eickhoff et al. (2012). This sample size overwrites
the Contrast-specific sample sizes in the dataset, in order to hold
kernel constant across Contrasts. Mutually exclusive with ``fwhm``.
Returns
-------
imgs : :obj:`list` of `nibabel.Nifti1Image`
A list of modeled activation images (one for each of the Contrasts
in the input dataset).
"""
self.fwhm = fwhm
self.n = n
if fwhm is not None and n is not None:
raise ValueError('Only one of fwhm and n may be provided.')
if not masked:
mask_data = self.mask.get_data().astype(float)
else:
mask_data = self.mask.get_data().astype(np.bool)
imgs = []
kernels = {}
for id_ in ids:
data = self.coordinates.loc[self.coordinates['id'] == id_]
ijk = data[['i', 'j', 'k']].values.astype(int)
if n is not None:
n_subjects = n
elif fwhm is None:
n_subjects = data['n'].astype(float).values[0]
if fwhm is not None:
assert np.isfinite(fwhm), 'FWHM must be finite number'
if fwhm not in kernels.keys():
_, kern = get_ale_kernel(self.mask, fwhm=fwhm)
kernels[fwhm] = kern
else:
kern = kernels[fwhm]
else:
assert np.isfinite(n_subjects), 'Sample size must be finite number'
if n not in kernels.keys():
_, kern = get_ale_kernel(self.mask, n=n_subjects)
kernels[n] = kern
else:
kern = kernels[n]
kernel_data = compute_ma(self.mask.shape, ijk, kern)
if not masked:
kernel_data *= mask_data
img = nib.Nifti1Image(kernel_data, self.mask.affine)
else:
img = kernel_data[mask_data]
imgs.append(img)
if masked:
imgs = np.vstack(imgs)
return imgs
class MKDAKernel(KernelEstimator):
"""
Generate MKDA modeled activation images from coordinates.
"""
def __init__(self, coordinates, mask):
self.mask = mask
self.coordinates = coordinates
self.r = None
self.value = None
def transform(self, ids, r=10, value=1, masked=False):
"""
Generate MKDA modeled activation images for each Contrast in dataset.
For each Contrast, a binary sphere of radius ``r`` is placed around
each coordinate. Voxels within overlapping regions between proximal
coordinates are set to 1, rather than the sum.
Parameters
----------
ids : :obj:`list`
A list of Contrast IDs for which to generate modeled activation
images.
r : :obj:`int`, optional
Sphere radius, in mm.
value : :obj:`int`, optional
Value for sphere.
Returns
-------
imgs : :obj:`list` of :obj:`nibabel.Nifti1Image`
A list of modeled activation images (one for each of the Contrasts
in the input dataset).
"""
self.r = r
self.value = value
r = float(r)
dims = self.mask.shape
vox_dims = self.mask.header.get_zooms()
if not masked:
mask_data = self.mask.get_data()
else:
mask_data = self.mask.get_data().astype(np.bool)
imgs = []
for id_ in ids:
data = self.coordinates.loc[self.coordinates['id'] == id_]
kernel_data = np.zeros(dims)
for ijk in data[['i', 'j', 'k']].values:
xx, yy, zz = [slice(-r // vox_dims[i], r // vox_dims[i] + 0.01, 1) for i in range(len(ijk))]
cube = np.vstack([row.ravel() for row in np.mgrid[xx, yy, zz]])
sphere = cube[:, np.sum(np.dot( | np.diag(vox_dims) | numpy.diag |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), | np.float32(5) | numpy.float32 |
import numpy as np
from pyquaternion import Quaternion
def getOrientationQuaternion(a, b):
assert np.allclose(np.linalg.norm(b), 1.)
v = np.cross(a,b)
s = np.linalg.norm(v)
if not np.allclose(s, 0):
c = np.dot(a,b)
vskew = np.array([[0, -v[2], v[1]], [v[2], 0 , -v[0]], [-v[1], v[0], 0] ])
# rotation matrix rotating a into b
R = np.eye(3) + vskew + np.dot(vskew, vskew) * ((1-c)/(s**2))
else:
R = np.eye(3)
assert np.allclose(R.dot(a), b)
# components are not in the right order
qi = Quaternion(matrix=R).elements
return | np.array([qi[1], qi[2], qi[3], qi[0]]) | numpy.array |
import numpy as np
from math import degrees
import math
def findAngle(a, b, c):
a = | np.array(a) | numpy.array |
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra import numpy as hnp
from numpy.testing import assert_allclose
import mygrad as mg
from mygrad import Tensor
from mygrad.errors import InvalidGradient
from mygrad.operation_base import BinaryUfunc, Operation, UnaryUfunc
from tests.utils.errors import does_not_raise
class OldOperation(Operation):
"""Implements old version of MyGrad back-propagation"""
def __call__(self, a):
self.variables = (a,)
return a.data
def backward_var(self, grad, index, **kwargs):
self.variables[index].backward(grad)
def old_op(a):
return Tensor._op(OldOperation, a)
@given(
constant=st.booleans(),
arr=hnp.arrays(
dtype=np.float64,
shape=hnp.array_shapes(min_dims=0),
elements=st.floats(-1e6, 1e6),
)
| st.floats(-1e6, 1e6),
op_before=st.booleans(),
op_after=st.booleans(),
)
def test_backpropping_non_numeric_gradient_raises(
constant: bool, arr: np.ndarray, op_before: bool, op_after: bool
):
x = Tensor(arr, constant=constant)
if op_before:
x += 1
x = old_op(x)
if op_after:
x = x * 2
# if constant tensor, backprop should not be triggered - no exception raised
with (pytest.raises(InvalidGradient) if not constant else does_not_raise()):
x.backward()
def test_simple_unary_ufunc_with_where():
from mygrad.math.exp_log.ops import Exp
exp = Exp()
mask = np.array([True, False, True])
out = exp(mg.zeros((3,)), where=mask, out=np.full((3,), fill_value=2.876))
assert_allclose(out, [1.0, 2.876, 1.0])
def test_simple_binary_ufunc_with_where():
from mygrad.math.arithmetic.ops import Multiply
mul = Multiply()
mask = np.array([True, False, True])
out = mul(
mg.ones((3,)),
mg.full((3,), 2.0),
where=mask,
out=np.full((3,), fill_value=2.876),
)
assert_allclose(out, [2.0, 2.876, 2.0])
def test_simple_sequential_func_with_where():
from mygrad.math.sequential.ops import Sum
sum_ = Sum()
assert sum_(mg.ones((3,)), where= | np.array([True, False, True]) | numpy.array |
import pytest
import sys
import os
from math import trunc, ceil, floor
import numpy as np
sys.path.insert(0, os.getcwd())
from uncvalue import Value, val, unc, set_unc # noqa: E402
ϵ = 1e-8
a = Value(3.1415, 0.0012)
b = Value(-1.618, 0.235)
c = Value(3.1264e2, 1.268)
A = np.array([[a, a], [b, b], [c, c]])
B = Value([a.x] * 5, a.ux)
C = Value([b.x] * 5, [b.ux] * 5)
@pytest.mark.parametrize('v, x', [
(a, a.x),
(A, np.array([[a.x, a.x], [b.x, b.x], [c.x, c.x]])),
(B, a.x),
(a.x, a.x)],
ids=['Single', 'Array of values', 'Value array', 'Number'])
def test_val(v, x):
assert np.all(val(v) == x)
@pytest.mark.parametrize('v, x', [
(a, a.ux),
(A, np.array([[a.ux, a.ux], [b.ux, b.ux], [c.ux, c.ux]])),
(B, a.ux),
(a.x, 0)],
ids=['Single', 'Array of values', 'Value array', 'Number'])
def test_unc(v, x):
assert np.all(unc(v) == x)
def test_set_unc():
v = set_unc(0.234, 0.0052)
assert isinstance(v, Value)
assert v.x == 0.234
assert v.ux == 0.0052
v = set_unc(a, 0.0052)
assert isinstance(v, Value)
assert v.x == a.x
assert v.ux == 0.0052
v = set_unc([0.234] * 8, 0.0052)
assert isinstance(v, np.ndarray)
assert v.shape == (8, )
assert np.mean(unc(v)) == 0.0052
v = set_unc([0.234] * 8, [0.0052] * 8)
assert isinstance(v, np.ndarray)
assert v.shape == (8, )
assert np.mean(unc(v)) == 0.0052
with pytest.raises(ValueError):
set_unc(np.random.random((3, 2, 1)), np.random.random((4, 2, 1)))
def test_constructor():
v = Value(3.1415, 0.0012)
assert v.x == 3.1415 == v.val
assert v.ux == 0.0012 == v.unc
with pytest.raises(ValueError):
Value(3.14, -0.28)
V = Value([3.1415] * 8, 0.0012)
assert V.x.shape == (8, )
assert V.ux.shape == (8, )
assert np.mean(V.ux) == 0.0012
V = Value([3.1415] * 8, [0.0012] * 8)
assert V.x.shape == (8, )
assert V.ux.shape == (8, )
assert np.mean(V.ux) == 0.0012
with pytest.raises(ValueError):
Value(np.random.random((3, 2, 1)), np.random.random((4, 2, 1)))
with pytest.raises(ValueError):
Value(1j, 0)
Value(1, 2j)
@pytest.mark.parametrize('x, y, r', [
(a.x, a, False),
(a, a.x, False),
(a, Value(a.x, a.ux * 5), False),
(b, a, True),
(a, a - 0.0001, False),
(A, A, False),
(B, C, False)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_smaller(x, y, r):
assert np.all((x < y) == r)
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(a.x + 1, a.ux)),
(a, 1, Value(a.x + 1, a.ux)),
(a, b, Value(a.x + b.x, np.hypot(a.ux, b.ux))),
(1, A, np.array([[a+1, a+1], [b+1, b+1], [c+1, c+1]])),
(a, A, np.array([[a+a, a+a], [b+a, b+a], [c+a, c+a]])),
(1, B, Value(1 + B.x, B.ux)),
(a, B, Value(a.x + B.x, np.hypot(a.ux, B.ux))),
(A, A, np.array([[a+a, a+a], [b+b, b+b], [c+c, c+c]])),
(B, C, Value(B.x + C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Number + Array', 'Value + Array', 'Array of values',
'Number + Valued array', 'Value + Valued array', 'Valued array'])
def test_add(x, y, r):
z = x + y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(a.x + 1, a.ux)),
(a.copy(), 1, Value(a.x + 1, a.ux)),
(a.copy(), b, Value(a.x + b.x, np.hypot(a.ux, b.ux))),
(B.copy(), C, Value(B.x + C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_iadd(x, y, r):
x += y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(1 - a.x, a.ux)),
(a, 1, Value(a.x - 1, a.ux)),
(a, b, Value(a.x - b.x, np.hypot(a.ux, b.ux))),
(A, A, np.array([[a-a, a-a], [b-b, b-b], [c-c, c-c]])),
(B, C, Value(B.x - C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array of values', 'Valued array'])
def test_sub(x, y, r):
z = x - y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(1 - a.x, a.ux)),
(a.copy(), 1, Value(a.x - 1, a.ux)),
(a.copy(), b, Value(a.x - b.x, np.hypot(a.ux, b.ux))),
(B.copy(), C, Value(B.x - C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_isub(x, y, r):
x -= y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 * a.x, 2 * a.ux)),
(a, 2, Value(a.x * 2, 2 * a.ux)),
(a, b, Value(a.x * b.x, np.hypot(a.ux * b.x, a.x * b.ux))),
(A, A, np.array([[a*a, a*a], [b*b, b*b], [c*c, c*c]])),
(B, C, Value(B.x * C.x, np.hypot(B.ux * C.x, B.x * C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array of values', 'Valued array'])
def test_mul(x, y, r):
z = x * y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 * a.x, 2 * a.ux)),
(a.copy(), 2, Value(a.x * 2, 2 * a.ux)),
(a.copy(), b, Value(a.x * b.x, np.hypot(a.ux * b.x, a.x * b.ux))),
(B.copy(), C, Value(B.x * C.x, np.hypot(B.ux * C.x, B.x * C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_imul(x, y, r):
x *= y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 / a.x, 2 * a.ux / a.x**2)),
(a, 2, Value(a.x / 2, a.ux / 2)),
(a, b, Value(a.x / b.x, np.hypot(a.ux / b.x, a.x * b.ux / b.x**2))),
(B, C, Value(B.x / C.x, np.hypot(B.ux / C.x, B.x * C.ux / C.x**2))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_div(x, y, r):
z = x / y
assert isinstance(z, Value)
assert np.all(z.x == r.x)
assert np.all(z.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 // a.x, 2 * a.ux // a.x**2)),
(a, 2, Value(a.x // 2, a.ux // 2)),
(a, b, Value(a.x // b.x, np.hypot(a.ux // b.x, a.x * b.ux // b.x**2))),
(B, C, Value(B.x // C.x, np.hypot(B.ux // C.x, B.x * C.ux // C.x**2))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_floordiv(x, y, r):
z = x // y
assert isinstance(z, Value)
assert np.all(z.x == r.x)
assert np.all(z.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 / a.x, 2 * a.ux / a.x**2)),
(a.copy(), 2, Value(a.x / 2, a.ux / 2)),
(a.copy(), b, Value(a.x / b.x, np.hypot(a.ux / b.x, a.x * b.ux / b.x**2))),
(B.copy(), C, Value(B.x / C.x, np.hypot(B.ux / C.x, B.x * C.ux / C.x**2))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_idiv(x, y, r):
x /= y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(2, a, Value(2 ** a.x, abs(2**a.x * np.log(2) * a.ux))),
(a, 2, Value(a.x ** 2, abs(2 * a.x**(2 - 1)) * a.ux)),
(a, b, Value(a.x ** b.x, np.hypot(b.x * a.x**(b.x-1) * a.ux, a.x**b.x * np.log(np.abs(a.x)) * b.ux))),
(B, C, Value(B.x ** C.x, np.hypot(C.x * B.x**(C.x-1) * B.ux, B.x**C.x * np.log(np.abs(B.x)) * C.ux)))
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_pow(x, y, r):
z = x**y
assert isinstance(z, Value)
assert np.all(z.x == r.x)
assert np.all(z.ux == r.ux)
@pytest.mark.parametrize('x, r', [
(a, Value(-a.x, a.ux)),
(A, np.array([[-a, -a], [-b, -b], [-c, -c]])),
(B, Value(-B.x, B.ux))
], ids=['Value', 'Array of values', 'Value array'])
def test_neg(x, r):
z = -x
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, r', [
(b, Value(abs(b.x), b.ux)),
(A, np.array([[a, a], [-b, -b], [c, c]])),
(B, Value(B.x, B.ux))
], ids=['Value', 'Array of values', 'Value array'])
def test_abs(x, r):
z = abs(x)
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, r', [
(a, Value(1 / a.x, a.ux / a.x**2)),
(A, np.array([[1 / a, 1 / a], [1 / b, 1 / b], [1 / c, 1 / c]])),
(B, Value(1 / B.x, B.ux / B.x**2))
], ids=['Value', 'Array of values', 'Value array'])
def test_invert(x, r):
z = ~x
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(a.x, a, True),
(a, a.x, True),
(a, Value(a.x, a.ux * 5), True),
(a, b, False),
(a, a + 0.0001, False),
(A, A, True),
(B, C, False)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_equality(x, y, r):
assert np.all((x == y) == r)
assert np.all((x != y) != r)
@pytest.mark.parametrize('x, y, r', [
(a.x, a, True),
(a, a.x, True),
(a, Value(a.x, a.ux * 5), True),
(b, a, False),
(a, a - 0.0001, True),
(A, A, True),
(B, C, True)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_greater_equal(x, y, r):
assert np.all((x >= y) == r)
@pytest.mark.parametrize('x, y, r', [
(a.x, a, False),
(a, a.x, False),
(a, Value(a.x, a.ux * 5), False),
(b, a, False),
(a, a - 0.0001, True),
(A, A, False),
(B, C, True)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_greater(x, y, r):
assert np.all((x > y) == r)
@pytest.mark.parametrize('x, y, r', [
(a.x, a, True),
(a, a.x, True),
(a, Value(a.x, a.ux * 5), True),
(b, a, True),
(a, a - 0.0001, False),
(A, A, True),
(B, C, False)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_smaller_equal(x, y, r):
assert np.all((x <= y) == r)
@pytest.mark.parametrize('x, y, r', [
(1, Value(1, 2), True),
(1, Value(0.75, 0.05), False),
(0.8, Value(0.75, 0.08), True),
(0.8, Value(0.75, 0.05), True),
(0.7, Value(0.75, 0.05), True),
(Value(0.8, 0.2), Value(0.7, 0.04), False)],
ids=['Inside', 'Outside', 'Inside float', 'Over upper limit', 'Over lower limit', 'Outside second value'])
def test_contains(x, y, r):
assert (x in y) == r
@pytest.mark.parametrize('x, s', [
(Value(2, 2), '2.0 ± 2.0'),
(Value(0.2, 2), '0.2 ± 2.0'),
(Value(0.2, 0.002385), '(200.0 ± 2.4)·10^-3'),
(Value(0.02414, 0.002345), '(24.1 ± 2.3)·10^-3'),
(Value(0.02415, 0.002365), '(24.2 ± 2.4)·10^-3')])
def test_print(x, s):
assert str(x) == s
@pytest.mark.parametrize('x, p, r', [
(0.145e-6, -8, 0.14e-6),
(0.001456, -4, 0.0015),
(0.0006666, -5, 0.00067),
(0.123, -2, 0.12),
(1, -1, 1.0),
(22.22, 0, 22.0),
(7684.65, 2, 77e2),
(17.8e8, 8, 18e8)])
def test_round(x, p, r):
v = Value(x, 10.0**p)
assert abs(round(v) - r) < ϵ
@pytest.mark.parametrize('x, p, r', [
(0.145e-6, -8, 0.14e-6),
(0.001456, -4, 0.0014),
(0.0006666, -5, 0.00066),
(0.123, -2, 0.12),
(1, -1, 1.0),
(22.22, 0, 22.0),
(7684.65, 2, 76e2),
(17.8e8, 8, 17e8)])
def test_trunc(x, p, r):
v = Value(x, 10.0**p)
assert abs(trunc(v) - r) < ϵ
@pytest.mark.parametrize('x, p, r', [
(0.145e-6, -8, 0.14e-6),
(0.001456, -4, 0.0014),
(0.0006666, -5, 0.00066),
(0.123, -2, 0.12),
(1, -1, 1.0),
(22.22, 0, 22.0),
(7684.65, 2, 76e2),
(17.8e8, 8, 17e8)])
def test_floor(x, p, r):
v = Value(x, 10.0**p)
assert abs(floor(v) - r) < ϵ
@pytest.mark.parametrize('x, p, r', [
(0.145e-6, -8, 0.15e-6),
(0.001456, -4, 0.0015),
(0.0006666, -5, 0.00067),
(0.123, -2, 0.13),
(1, -1, 1.0),
(22.22, 0, 23.0),
(7684.65, 2, 77e2),
(17.8e8, 8, 18e8)])
def test_ceil(x, p, r):
v = Value(x, 10.0**p)
assert abs(ceil(v) - r) < ϵ
def test_convert_to_number():
assert complex(a) == a.x + 0j
assert float(a) == a.x
assert int(a) == 3
assert bool(a)
assert not bool(Value(0, 0.0028))
def test_copy():
v = a.copy()
assert v.x == a.x
assert v.ux == a.ux
@pytest.mark.parametrize('ux, acc', [
(0.145e-6, -7),
(0.001456, -3),
(0.0006666, -4),
(0.123, -1),
(1, 0),
(22.22, 1),
(7684.65, 3),
(17.8e8, 9)])
def test_precision(ux, acc):
v = Value(1, ux)
assert v.precision() == acc
@pytest.mark.parametrize('x', [
a, abs(A), B
], ids=['Value', 'Array of values', 'Valued array'])
def test_log(x):
z = np.log(x)
assert np.all(val(z) == np.log(val(x)))
assert np.all(unc(z) == unc(x) / val(x))
@pytest.mark.parametrize('x', [
a, abs(A), B
], ids=['Value', 'Array of values', 'Valued array'])
def test_log2(x):
z = np.log2(x)
assert np.all(val(z) == np.log2(val(x)))
assert np.all(unc(z) == unc(x) / (val(x) * np.log(2)))
@pytest.mark.parametrize('x', [
a, abs(A), B
], ids=['Value', 'Array of values', 'Valued array'])
def test_log10(x):
z = np.log10(x)
assert np.all(val(z) == np.log10(val(x)))
assert np.all(unc(z) == unc(x) / (val(x) * np.log(10)))
@pytest.mark.parametrize('x', [
a, abs(A), B
], ids=['Value', 'Array of values', 'Valued array'])
def test_log1p(x):
z = np.log1p(x)
assert np.all(val(z) == np.log1p(val(x)))
assert np.all(unc(z) == unc(x) / (val(x) + 1))
@pytest.mark.parametrize('x', [
a, b, abs(A), B
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_exp(x):
z = np.exp(x)
assert np.all(val(z) == np.exp(val(x)))
assert np.all(unc(z) == unc(x) * np.exp(val(x)))
@pytest.mark.parametrize('x', [
a, b, abs(A), B
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_exp2(x):
z = np.exp2(x)
assert np.all(val(z) == np.exp2(val(x)))
assert np.all(unc(z) == unc(x) * np.exp2(val(x)) * np.log(2))
@pytest.mark.parametrize('x', [
a, b, abs(A), B
], ids=['Value', 'Nagetive', 'Array of values', 'Valued array'])
def test_expm1(x):
z = np.expm1(x)
assert np.all(val(z) == np.expm1(val(x)))
assert np.all(unc(z) == unc(x) * np.exp(val(x)))
@pytest.mark.parametrize('x', [
a, b, abs(A), B
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_sin(x):
z = np.sin(x)
assert np.all(val(z) == np.sin(val(x)))
assert np.all(unc(z) == np.abs(unc(x) * np.cos(val(x))))
@pytest.mark.parametrize('x', [
a, b, abs(A), B
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_cos(x):
z = np.cos(x)
assert np.all(val(z) == np.cos(val(x)))
assert np.all(unc(z) == np.abs(unc(x) * np.sin(val(x))))
@pytest.mark.parametrize('x', [
a, b, abs(A), B
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_tan(x):
z = np.tan(x)
assert np.all(val(z) == np.tan(val(x)))
assert np.all(unc(z) == np.abs(unc(x) / np.cos(val(x))**2))
@pytest.mark.parametrize('x', [
a / 10, b / 10, abs(A) / 1000, B / 10
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_arcsin(x):
z = np.arcsin(x)
assert np.all(val(z) == np.arcsin(val(x)))
assert np.all(unc(z) == np.abs(unc(x) / np.sqrt(1 - val(x)**2)))
@pytest.mark.parametrize('x', [
a / 10, b / 10, abs(A) / 1000, B / 10
], ids=['Value', 'Negative', 'Array of values', 'Valued array'])
def test_arccos(x):
z = | np.arccos(x) | numpy.arccos |
import numpy as np
import os
import absl
# %tensorflow_version 1.x
import tensorflow as tf
import pylab
from tensorflow.python.ops import parallel_for as pfor
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy.random as nrand
import pickle
from FixedPointStore import FixedPointStore
from FixedPointSearch import FixedPointSearch
import pickle
import tables
import time
from AdaptiveGradNormClip import AdaptiveGradNormClip
from AdaptiveLearningRate import AdaptiveLearningRate
from collections import namedtuple
LIFStateTuple = namedtuple('LIFStateTuple', ('v', 'z', 'i_future_buffer', 'z_buffer'))
ALIFStateTuple = namedtuple('ALIFStateTuple', ('z','v','b','i_future_buffer','z_buffer'))
import sys
sys.path.insert(0,'/content/LSNN-official/')
import lsnn.spiking_models as lsnn
from lsnn.toolbox.tensorflow_einsums.einsum_re_written import einsum_bij_jk_to_bik
# from lsnn.toolbox.rewiring_tools import rewiring_optimizer_wrapper
from lsnn.spiking_models import tf_cell_to_savable_dict, exp_convolve #ALIF, LIF
from lsnn.toolbox.rewiring_tools import weight_sampler, rewiring_optimizer_wrapper
class FlipFlop:
#Hyperparameters
hyp_dict = \
{'time' : 500,
'bits' : 3 ,
'num_steps' : 6,
'batch_size' : 64,
'neurons' : 64 ,
'num_classes' : 2,
'p': 0.2,
'learning_rate' :0.01,
'decay_steps': 100,
'c_type':'UGRNN'
}
'''
Architectures:
Vanilla, UG-RNN, GRU, LSTM
Activation:
Tanh, relu
Num_units:
64, 128, 256
L2 regularization:
1e-5, 1e-4, 1e-3, 1e-2
'''
def __init__(self,
time = hyp_dict['time'],
bits = hyp_dict['bits'],
num_steps = hyp_dict['num_steps'],
batch_size = hyp_dict['batch_size'],
neurons = hyp_dict['neurons'],
num_classes = hyp_dict['num_classes'],
learning_rate = hyp_dict['learning_rate'],
p = hyp_dict['p'],
c_type = hyp_dict['c_type'],
l2 = 0.01,
decay_steps = 0,
_seed = 400,
**hps):
self.seed = _seed
self.time = time
self.new_lr = 0
self.hps = hps
self.cell = None
self.activation = 'tanh'
self.lr_update = 0
self.bits = bits
self.model = None
self.fps = None
self.alr_hps = {}#{'initial_rate': 1.0, 'min_rate': 1e-5}
self.grad_global_norm = 0
self.grad_norm_clip_val = 0
self.dtype = tf.float32
self.opt = 'norm'
self.l2_loss = l2
self.decay_steps = decay_steps
# self.adaptive_learning_rate = AdaptiveLearningRate(**self.alr_hps)
# self.adaptive_grad_norm_clip = AdaptiveGradNormClip(**{})
self.num_steps = num_steps
self.num_steps =num_steps
self.batch_size = batch_size
self.neurons = neurons
self.num_classes = num_classes
self.learning_rate = learning_rate
self.p = p
self.graph = 0
self.chkpt = None
self.c_type = c_type
self.sess = 0
self.test_data = []
np.random.seed(self.seed)
def flip_flop(self, p = 0, plot=False):
unsigned_inp = | np.random.binomial(1,p,[self.batch_size,self.time//10,self.bits]) | numpy.random.binomial |
from typing import Callable
import numpy as np
from shapely.geometry import LineString, MultiLineString
from shapely.ops import unary_union
from .pattern import Pattern
from .geometry import Geometry
from .port import Port
from .typing import CurveLike, CurveTuple, Float4, Iterable, List, Optional, PathWidth, Union
from .utils import DECIMALS, linestring_points, MAX_GDS_POINTS, min_aspect_bounds
class Curve(Geometry):
"""A discrete curve consisting of points and tangents that used to define paths of varying widths.
Note:
In our definition of curve, we allow for multiple curve segments that are unconnected to each other.
Attributes:
curve: A function :math:`f(t) = (x(t), y(t))`, given :math:`t \\in [0, 1]`, or a length (float),
or a list of points, or a tuple of points and tangents.
resolution: Number of evaluations to define :math:`f(t)` (number of points in the curve).
"""
def __init__(self, *curves: Union[float, "Curve", CurveLike, List[CurveLike]]):
points, tangents = get_ndarray_curve(curves)
super().__init__(points, {}, [], tangents)
self.port = self.path_port()
def angles(self, path: bool = True):
"""Calculate the angles for the tangents along the curve.
Args:
path: Whether to report the angles for the full coalesced curve.
Returns:
The angles of the tangents along the curve.
"""
if path:
t = np.hstack(self.tangents)
return np.unwrap(np.arctan2(t[1], t[0]))
else:
return [np.unwrap(np.arctan2(t[1], t[0])) for t in self.tangents]
def total_length(self, path: bool = True):
"""Calculate the total length at the end of each line segment of the curve.
Args:
path: Whether to report the lengths of the segments for the full coalesced curve.
Returns:
The lengths for the individual line segments of the curve.
"""
if path:
return np.cumsum(self.lengths())
else:
return [np.cumsum(p) for p in self.lengths(path=False)]
def lengths(self, path: bool = True):
"""Calculate the lengths of each line segment of the curve.
Args:
path: Whether to report the lengths of the segments for the full coalesced curve.
Returns:
The lengths for the individual line segments of the curve.
"""
if path:
return np.linalg.norm(np.diff(self.points), axis=0)
else:
return [np.linalg.norm(np.diff(p), axis=0) for p in self.geoms]
@property
def pathlength(self):
return np.sum(self.lengths(path=True))
def curvature(self, path: bool = True, min_frac: float = 1e-3):
"""Calculate the curvature vs length along the curve.
Args:
path: Whether to report the curvature vs length for the full coalesced curve.
min_frac: The minimum
Returns:
A tuple of the lengths and curvature along the length.
"""
min_dist = min_frac * np.mean(self.lengths(path=True))
if path:
d, a = self.lengths(path=True), self.angles(path=True)
return np.cumsum(d)[d > min_dist], np.diff(a)[d > min_dist] / d[d > min_dist]
else:
return [(np.cumsum(d)[d > min_dist], np.diff(a)[d > min_dist] / d[d > min_dist])
for d, a in zip(self.lengths(path=False), self.angles(path=False))]
@property
def normals(self):
"""Calculate the normals (perpendicular to the tangents) along the curve.
Returns:
The normals for the curve.
"""
return [np.vstack((-np.sin(a), np.cos(a))) for a in self.angles()]
def path_port(self, w: float = 1):
"""Get the port and orientations from the normals of the curve assuming it is a piecewise path.
Note:
This function will not make sense if there are multiple unconnected curves.
This is generally reserved for path-related operations.
Unexpected behavior will occur if this method is used for arbitrary curve sets.
Args:
w: width of the port.
Returns:
The ports for the curve.
"""
n = self.normals
n = (n[0].T[0], n[-1].T[-1])
p = (self.geoms[0].T[0], self.geoms[-1].T[-1])
return {
'a0': Port.from_points(np.array((p[0] + n[0] * w / 2, p[0] - n[0] * w / 2))),
'b0': Port.from_points(np.array((p[1] - n[1] * w / 2, p[1] + n[1] * w / 2)))
}
@property
def shapely(self):
"""Shapely geometry
Returns:
The multiline string for the geometries.
"""
return MultiLineString([LineString(p.T) for p in self.geoms])
def coalesce(self):
"""Coalesce path segments into a single path
Note:
Caution: This assumes a C1 path, so paths with discontinuities will have incorrect tangents.
Returns:
The coalesced Curve.
"""
self.geoms = [self.points]
self.tangents = [np.hstack(self.tangents)]
return self
@property
def interpolated(self):
"""Interpolated curve such that all segments have equal length.
Returns:
The interpolated path.
"""
lengths = [np.sum(length) for length in self.lengths(path=False)]
# interpolate, but also ensure endpoints have the correct original tangents
def _interp(g: np.ndarray, t: np.ndarray, p: LineString, length: float):
ls = LineString([p.interpolate(d * length) for d in np.linspace(0, 1, g.shape[1])])
points = linestring_points(ls).T
tangents = np.gradient(points, axis=1).T
tangents = np.vstack((t.T[0], tangents[1:-1], t.T[-1])).T
return CurveTuple(points, tangents)
return Curve([_interp(g, t, p, length)
for g, t, p, length in zip(self.geoms, self.tangents, self.shapely.geoms, lengths)])
def path(self, width: Union[float, Iterable[PathWidth]] = 1, offset: Union[float, Iterable[PathWidth]] = 0,
decimals: int = DECIMALS) -> Pattern:
"""Path (pattern) converted from this curve using width and offset specifications.
Args:
width: Width of the path. If a list of callables, apply a parametric width to each curve segment.
offset: Offset of the path. If a list of callables, apply a parametric offset to each curve segment.
decimals: Decimal precision of the path.
Returns:
A pattern representing the path.
"""
path_patterns = []
widths = [width] * self.num_geoms if not isinstance(width, list) and not isinstance(width, tuple) else width
offsets = [offset] * self.num_geoms if not isinstance(offset, list) and not isinstance(offset,
tuple) else offset
if not len(widths) == self.num_geoms:
raise AttributeError(f"Expected len(widths) == self.num_geoms, but got {len(widths)} != {self.num_geoms}")
if not len(offsets) == self.num_geoms:
raise AttributeError(f"Expected len(offsets) == self.num_geoms, but got {len(offsets)} != {self.num_geoms}")
for segment, tangent, width, offset in zip(self.geoms, self.tangents, widths, offsets):
if callable(width):
t = np.linspace(0, 1, segment.shape[1])[:, np.newaxis]
width = width(t)
if callable(offset):
t = np.linspace(0, 1, segment.shape[1])[:, np.newaxis]
offset = offset(t)
path_patterns.append(curve_to_path(segment, width, tangent, offset, decimals))
path = Pattern(path_patterns).set_port({'a0': path_patterns[0].port['a0'],
'b0': path_patterns[-1].port['b0']})
path.curve = self
# path.refs.append(path.curve)
return path
def hvplot(self, line_width: float = 2, color: str = 'black', bounds: Optional[Float4] = None, alternate_color: Optional[str] = None,
plot_ports: bool = True):
"""Plot this device on a matplotlib plot.
Args:
line_width: The width of the line for plotting.
color: The color for plotting the pattern.
alternate_color: Plot segments of the curve alternating :code:`color` and :code`alternate_color`.
bounds: Bounds of the plot.
plot_ports: Plot the ports of the curve.
Returns:
The holoviews Overlay for displaying all of the polygons.
"""
import holoviews as hv
plots_to_overlay = []
alternate_color = color if not alternate_color else alternate_color
b = min_aspect_bounds(self.bounds) if bounds is None else bounds
for i, curve in enumerate(self.geoms):
plots_to_overlay.append(
hv.Curve((curve[0], curve[1])).opts(data_aspect=1, frame_height=200, line_width=line_width,
ylim=(b[1], b[3]), xlim=(b[0], b[2]),
color=(color, alternate_color)[i % 2], tools=['hover']))
if plot_ports:
for name, port in self.port.items():
plots_to_overlay.append(port.hvplot(name))
return hv.Overlay(plots_to_overlay)
@property
def pattern(self):
return Pattern(self.geoms)
@property
def segments(self):
return [Curve(CurveTuple(g, t)) for g, t in zip(self.geoms, self.tangents)]
@property
def copy(self) -> "Curve":
"""Copies the pattern using deepcopy.
Returns:
A copy of the Pattern so that changes do not propagate to the original :code:`Pattern`.
"""
curve = Curve([CurveTuple(g, t) for g, t in zip(self.geoms, self.tangents)])
curve.port = self.port_copy
curve.refs = [ref.copy for ref in self.refs]
return curve
def curve_to_path(points: np.ndarray, widths: Union[float, np.ndarray], tangents: np.ndarray,
offset: Union[float, np.ndarray] = 0, decimals: int = DECIMALS,
max_num_points: int = MAX_GDS_POINTS):
"""Converts a curve to a path.
Args:
points: The points along the curve.
tangents: The normal directions / derivatives evaluated at the points along the curve.
widths: The widths at each point along the curve (measured perpendicular to the tangents).
offset: Offset of the path.
decimals: Number of decimals precision for the curve output.
max_num_points: Maximum number of points allowed in the curve (otherwise, break it apart).
Note that the polygon will have twice this amount.
Returns:
The resulting Pattern.
"""
# step 1: find the path polygon points based on the points, tangents, widths, and offset
angles = np.arctan2(tangents[1], tangents[0])
w = np.vstack((-np.sin(angles) * widths, | np.cos(angles) | numpy.cos |
# -*- coding: utf-8 -*-
"""Autoregressive model for multivariate time series outlier detection.
"""
import numpy as np
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import column_or_1d
from .CollectiveBase import CollectiveBaseDetector
from combo.models.score_comb import average, maximization, median, aom, moa
from combo.utils.utility import standardizer
from .AutoRegOD import AutoRegOD
from .utility import get_sub_sequences_length
class MultiAutoRegOD(CollectiveBaseDetector):
"""Autoregressive models use linear regression to calculate a sample's
deviance from the predicted value, which is then used as its
outlier scores. This model is for multivariate time series.
This model handles multivariate time series by various combination
approaches. See AutoRegOD for univarite data.
See :cite:`aggarwal2015outlier,zhao2020using` for details.
Parameters
----------
window_size : int
The moving window size.
step_size : int, optional (default=1)
The displacement for moving window.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e.
the proportion of outliers in the data set. When fitting this is used
to define the threshold on the decision function.
method : str, optional (default='average')
Combination method: {'average', 'maximization',
'median'}. Pass in weights of detector for weighted version.
weights : numpy array of shape (1, n_dimensions)
Score weight by dimensions.
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is
fitted.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
"""
def __init__(self, window_size, step_size=1, method='average',
weights=None, contamination=0.1):
super(MultiAutoRegOD, self).__init__(contamination=contamination)
self.window_size = window_size
self.step_size = step_size
self.method = method
self.weights = weights
def _validate_weights(self):
"""Internal function for validating and adjust weights.
Returns
-------
"""
if self.weights is None:
self.weights = np.ones([1, self.n_models_])
else:
self.weights = column_or_1d(self.weights).reshape(
1, len(self.weights))
assert (self.weights.shape[1] == self.n_models_)
# adjust probability by a factor for integrity
adjust_factor = self.weights.shape[1] / np.sum(self.weights)
self.weights = self.weights * adjust_factor
def _fit_univariate_model(self, X):
"""Internal function for fitting one dimensional ts.
"""
X = check_array(X)
n_samples, n_sequences = X.shape[0], X.shape[1]
models = []
# train one model for each dimension
for i in range(n_sequences):
models.append(AutoRegOD(window_size=self.window_size,
step_size=self.step_size,
contamination=self.contamination))
models[i].fit(X[:, i].reshape(-1, 1))
return models
def _score_combination(self, scores): # pragma: no cover
"""Internal function for combining univarite scores.
"""
# combine by different approaches
if self.method == 'average':
return average(scores, estimator_weights=self.weights)
if self.method == 'maximization':
return maximization(scores)
if self.method == 'median':
return median(scores)
def fit(self, X: np.array) -> object:
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
X = check_array(X).astype(np.float)
# fit each dimension individually
self.models_ = self._fit_univariate_model(X)
self.valid_len_ = self.models_[0].valid_len_
self.n_models_ = len(self.models_)
# assign the left and right inds, same for all models
self.left_inds_ = self.models_[0].left_inds_
self.right_inds_ = self.models_[0].right_inds_
# validate and adjust weights
self._validate_weights()
# combine the scores from all dimensions
self._decison_mat = np.zeros([self.valid_len_, self.n_models_])
for i in range(self.n_models_):
self._decison_mat[:, i] = self.models_[i].decision_scores_
# scale scores by standardization before score combination
self._decison_mat_scalaled, self._score_scalar = standardizer(
self._decison_mat, keep_scalar=True)
self.decision_scores_ = self._score_combination(
self._decison_mat_scalaled)
self._process_decision_scores()
return self
def decision_function(self, X: np.array):
"""Predict raw anomaly scores of X using the fitted detector.
The anomaly score of an input sample is computed based on the fitted
detector. For consistency, outliers are assigned with
higher anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
check_is_fitted(self, ['models_'])
X = check_array(X).astype(np.float)
assert (X.shape[1] == self.n_models_)
n_samples = len(X)
# need to subtract 1 because need to have y for subtraction
valid_len = get_sub_sequences_length(n_samples, self.window_size,
self.step_size) - 1
# combine the scores from all dimensions
decison_mat = np.zeros([valid_len, self.n_models_])
for i in range(self.n_models_):
decison_mat[:, i], X_left_inds, X_right_inds = \
self.models_[i].decision_function(X[:, i].reshape(-1, 1))
# scale the decision mat
decison_mat_scaled = self._score_scalar.transform(decison_mat)
decision_scores = self._score_combination(decison_mat_scaled)
# print(decision_scores.shape, X_left_inds.shape, X_right_inds.shape)
decision_scores = np.concatenate(( | np.zeros((self.window_size,)) | numpy.zeros |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for HDF5 object
"""
from __future__ import absolute_import, division, print_function
import os
import copy
import numpy as np
import nose.tools as nt
from astropy.time import Time
from pyuvdata import UVData
import pyuvdata.utils as uvutils
from pyuvdata.data import DATA_PATH
import pyuvdata.tests as uvtest
try:
import h5py
from pyuvdata import uvh5
from pyuvdata.uvh5 import _hera_corr_dtype
except(ImportError):
pass
@uvtest.skipIf_no_h5py
def test_ReadMiriadWriteUVH5ReadUVH5():
"""
Miriad round trip test
"""
uv_in = UVData()
uv_out = UVData()
miriad_file = os.path.join(DATA_PATH, 'zen.2456865.60537.xy.uvcRREAA')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_miriad.uvh5')
uvtest.checkWarnings(uv_in.read_miriad, [miriad_file],
nwarnings=1, category=[UserWarning],
message=['Altitude is not present'])
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# also test round-tripping phased data
uv_in.phase_to_time(Time(np.mean(uv_in.time_array), format='jd'))
uv_in.write_uvh5(testfile, clobber=True)
uv_out.read(testfile)
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
@uvtest.skipIf_no_h5py
def test_ReadUVFITSWriteUVH5ReadUVH5():
"""
UVFITS round trip test
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv_in.write_uvh5(testfile, clobber=True)
uvtest.checkWarnings(uv_out.read, [testfile], message='Telescope EVLA is not')
nt.assert_equal(uv_in, uv_out)
# also test writing double-precision data_array
uv_in.data_array = uv_in.data_array.astype(np.complex128)
uv_in.write_uvh5(testfile, clobber=True)
uvtest.checkWarnings(uv_out.read, [testfile], message='Telescope EVLA is not')
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
@uvtest.skipIf_no_h5py
def test_ReadUVH5Errors():
"""
Test raising errors in read function
"""
uv_in = UVData()
fake_file = os.path.join(DATA_PATH, 'fake_file.uvh5')
nt.assert_raises(IOError, uv_in.read_uvh5, fake_file)
nt.assert_raises(ValueError, uv_in.read_uvh5, ['list of', 'fake files'], read_data=False)
return
@uvtest.skipIf_no_h5py
def test_WriteUVH5Errors():
"""
Test raising errors in write_uvh5 function
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
with open(testfile, 'a'):
os.utime(testfile, None)
# assert IOError if file exists
nt.assert_raises(IOError, uv_in.write_uvh5, testfile, clobber=False)
# use clobber=True to write out anyway
uv_in.write_uvh5(testfile, clobber=True)
uvtest.checkWarnings(uv_out.read, [testfile], message='Telescope EVLA is not')
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
@uvtest.skipIf_no_h5py
def test_UVH5OptionalParameters():
"""
Test reading and writing optional parameters not in sample files
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits.uvh5')
# set optional parameters
uv_in.x_orientation = 'east'
uv_in.antenna_diameters = np.ones_like(uv_in.antenna_numbers) * 1.
uv_in.uvplane_reference_time = 0
# write out and read back in
uv_in.write_uvh5(testfile, clobber=True)
uvtest.checkWarnings(uv_out.read, [testfile], message='Telescope EVLA is not')
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
@uvtest.skipIf_no_h5py
def test_UVH5CompressionOptions():
"""
Test writing data with compression filters
"""
uv_in = UVData()
uv_out = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uv_in.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest_uvfits_compression.uvh5')
# write out and read back in
uv_in.write_uvh5(testfile, clobber=True, data_compression="lzf",
flags_compression=None, nsample_compression=None)
uvtest.checkWarnings(uv_out.read, [testfile], message='Telescope EVLA is not')
nt.assert_equal(uv_in, uv_out)
# clean up
os.remove(testfile)
return
@uvtest.skipIf_no_h5py
def test_UVH5ReadMultiple_files():
"""
Test reading multiple uvh5 files
"""
uv_full = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile1 = os.path.join(DATA_PATH, 'test/uv1.uvh5')
testfile2 = os.path.join(DATA_PATH, 'test/uv2.uvh5')
uvtest.checkWarnings(uv_full.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv1 = copy.deepcopy(uv_full)
uv2 = copy.deepcopy(uv_full)
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1.write_uvh5(testfile1, clobber=True)
uv2.write_uvh5(testfile2, clobber=True)
uvtest.checkWarnings(uv1.read, [[testfile1, testfile2]], nwarnings=2,
message='Telescope EVLA is not')
# Check history is correct, before replacing and doing a full object check
nt.assert_true(uvutils._check_histories(uv_full.history + ' Downselected to '
'specific frequencies using pyuvdata. '
'Combined data along frequency axis using'
' pyuvdata.', uv1.history))
uv1.history = uv_full.history
nt.assert_equal(uv1, uv_full)
# clean up
os.remove(testfile1)
os.remove(testfile2)
return
@uvtest.skipIf_no_h5py
def test_UVH5ReadMultiple_files_axis():
"""
Test reading multiple uvh5 files with setting axis
"""
uv_full = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
testfile1 = os.path.join(DATA_PATH, 'test/uv1.uvh5')
testfile2 = os.path.join(DATA_PATH, 'test/uv2.uvh5')
uvtest.checkWarnings(uv_full.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
uv1 = copy.deepcopy(uv_full)
uv2 = copy.deepcopy(uv_full)
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1.write_uvh5(testfile1, clobber=True)
uv2.write_uvh5(testfile2, clobber=True)
uvtest.checkWarnings(uv1.read, [[testfile1, testfile2]], {'axis': 'freq'},
nwarnings=2, message='Telescope EVLA is not')
# Check history is correct, before replacing and doing a full object check
nt.assert_true(uvutils._check_histories(uv_full.history + ' Downselected to '
'specific frequencies using pyuvdata. '
'Combined data along frequency axis using'
' pyuvdata.', uv1.history))
uv1.history = uv_full.history
nt.assert_equal(uv1, uv_full)
# clean up
os.remove(testfile1)
os.remove(testfile2)
return
@uvtest.skipIf_no_h5py
def test_UVH5PartialRead():
"""
Test reading in only part of a dataset from disk
"""
uvh5_uv = UVData()
uvh5_uv2 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(uvh5_uv.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
uvh5_uv.telescope_name = 'PAPER'
uvh5_uv.write_uvh5(testfile, clobber=True)
# select on antennas
ants_to_keep = np.array([0, 19, 11, 24, 3, 23, 1, 20, 21])
uvh5_uv.read(testfile, antenna_nums=ants_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# select on frequency channels
chans_to_keep = np.arange(12, 22)
uvh5_uv.read(testfile, freq_chans=chans_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(freq_chans=chans_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# select on pols
pols_to_keep = [-1, -2]
uvh5_uv.read(testfile, polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# select on read using time_range
unique_times = np.unique(uvh5_uv.time_array)
uvtest.checkWarnings(uvh5_uv.read, [testfile],
{'time_range': [unique_times[0], unique_times[1]]},
message=['Warning: "time_range" keyword is set'])
uvh5_uv2.read(testfile)
uvh5_uv2.select(times=unique_times[0:2])
nt.assert_equal(uvh5_uv, uvh5_uv2)
# now test selecting on multiple axes
# frequencies first
uvh5_uv.read(testfile, antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# baselines first
ants_to_keep = np.array([0, 1])
uvh5_uv.read(testfile, antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# polarizations first
ants_to_keep = np.array([0, 1, 2, 3, 6, 7, 8, 11, 14, 18, 19, 20, 21, 22])
chans_to_keep = np.arange(12, 64)
uvh5_uv.read(testfile, antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
uvh5_uv2.read(testfile)
uvh5_uv2.select(antenna_nums=ants_to_keep, freq_chans=chans_to_keep,
polarizations=pols_to_keep)
nt.assert_equal(uvh5_uv, uvh5_uv2)
# clean up
os.remove(testfile)
return
@uvtest.skipIf_no_h5py
def test_UVH5PartialWrite():
"""
Test writing an entire UVH5 file in pieces
"""
full_uvh5 = UVData()
partial_uvh5 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(full_uvh5.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
full_uvh5.telescope_name = "PAPER"
# cut down the file size to decrease testing time
full_uvh5.select(antenna_nums=[3, 7, 24])
full_uvh5.lst_array = uvutils.get_lst_for_time(full_uvh5.time_array,
*full_uvh5.telescope_location_lat_lon_alt_degrees)
full_uvh5.write_uvh5(testfile, clobber=True)
full_uvh5.read(testfile)
# delete data arrays in partial file
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
# write to file by iterating over antpairpol
antpairpols = full_uvh5.get_antpairpols()
for key in antpairpols:
data = full_uvh5.get_data(key, squeeze='none')
flags = full_uvh5.get_flags(key, squeeze='none')
nsamples = full_uvh5.get_nsamples(key, squeeze='none')
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
bls=key)
# now read in the full file and make sure that it matches the original
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# test add_to_history
key = antpairpols[0]
data = full_uvh5.get_data(key, squeeze='none')
flags = full_uvh5.get_flags(key, squeeze='none')
nsamples = full_uvh5.get_nsamples(key, squeeze='none')
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
bls=key, add_to_history="foo")
partial_uvh5.read(partial_testfile, read_data=False)
nt.assert_true('foo' in partial_uvh5.history)
# start over, and write frequencies
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
Nfreqs = full_uvh5.Nfreqs
Hfreqs = Nfreqs // 2
freqs1 = np.arange(Hfreqs)
freqs2 = np.arange(Hfreqs, Nfreqs)
data = full_uvh5.data_array[:, :, freqs1, :]
flags = full_uvh5.flag_array[:, :, freqs1, :]
nsamples = full_uvh5.nsample_array[:, :, freqs1, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
freq_chans=freqs1)
data = full_uvh5.data_array[:, :, freqs2, :]
flags = full_uvh5.flag_array[:, :, freqs2, :]
nsamples = full_uvh5.nsample_array[:, :, freqs2, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
freq_chans=freqs2)
# read in the full file and make sure it matches
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# start over, write chunks of blts
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
Nblts = full_uvh5.Nblts
Hblts = Nblts // 2
blts1 = np.arange(Hblts)
blts2 = np.arange(Hblts, Nblts)
data = full_uvh5.data_array[blts1, :, :, :]
flags = full_uvh5.flag_array[blts1, :, :, :]
nsamples = full_uvh5.nsample_array[blts1, :, :, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
blt_inds=blts1)
data = full_uvh5.data_array[blts2, :, :, :]
flags = full_uvh5.flag_array[blts2, :, :, :]
nsamples = full_uvh5.nsample_array[blts2, :, :, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
blt_inds=blts2)
# read in the full file and make sure it matches
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# start over, write groups of pols
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True)
Npols = full_uvh5.Npols
Hpols = Npols // 2
pols1 = np.arange(Hpols)
pols2 = np.arange(Hpols, Npols)
data = full_uvh5.data_array[:, :, :, pols1]
flags = full_uvh5.flag_array[:, :, :, pols1]
nsamples = full_uvh5.nsample_array[:, :, :, pols1]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
polarizations=full_uvh5.polarization_array[:Hpols])
data = full_uvh5.data_array[:, :, :, pols2]
flags = full_uvh5.flag_array[:, :, :, pols2]
nsamples = full_uvh5.nsample_array[:, :, :, pols2]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
polarizations=full_uvh5.polarization_array[Hpols:])
# read in the full file and make sure it matches
partial_uvh5.read(partial_testfile)
nt.assert_equal(full_uvh5, partial_uvh5)
# clean up
os.remove(testfile)
os.remove(partial_testfile)
return
@uvtest.skipIf_no_h5py
def test_UVH5PartialWriteIrregular():
"""
Test writing a uvh5 file using irregular intervals
"""
def initialize_with_zeros(uvd, filename):
"""
Initialize a file with all zeros for data arrays
"""
uvd.initialize_uvh5_file(filename, clobber=True)
data_shape = (uvd.Nblts, 1, uvd.Nfreqs, uvd.Npols)
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
with h5py.File(filename, 'r+') as f:
dgrp = f['/Data']
data_dset = dgrp['visdata']
flags_dset = dgrp['flags']
nsample_dset = dgrp['nsamples']
data_dset = data
flags_dset = flags
nsample_dset = nsamples
return
full_uvh5 = UVData()
partial_uvh5 = UVData()
uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits')
uvtest.checkWarnings(full_uvh5.read_uvfits, [uvfits_file], message='Telescope EVLA is not')
testfile = os.path.join(DATA_PATH, 'test', 'outtest.uvh5')
full_uvh5.telescope_name = "PAPER"
full_uvh5.write_uvh5(testfile, clobber=True)
full_uvh5.read(testfile)
# delete data arrays in partial file
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# write a single blt to file
blt_inds = np.arange(1)
data = full_uvh5.data_array[blt_inds, :, :, :]
flags = full_uvh5.flag_array[blt_inds, :, :, :]
nsamples = full_uvh5.nsample_array[blt_inds, :, :, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples, blt_inds=blt_inds)
# also write the arrays to the partial object
partial_uvh5.data_array[blt_inds, :, :, :] = data
partial_uvh5.flag_array[blt_inds, :, :, :] = flags
partial_uvh5.nsample_array[blt_inds, :, :, :] = nsamples
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# do it again, with a single frequency
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# write a single freq to file
freq_inds = np.arange(1)
data = full_uvh5.data_array[:, :, freq_inds, :]
flags = full_uvh5.flag_array[:, :, freq_inds, :]
nsamples = full_uvh5.nsample_array[:, :, freq_inds, :]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
freq_chans=freq_inds)
# also write the arrays to the partial object
partial_uvh5.data_array[:, :, freq_inds, :] = data
partial_uvh5.flag_array[:, :, freq_inds, :] = flags
partial_uvh5.nsample_array[:, :, freq_inds, :] = nsamples
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# do it again, with a single polarization
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# write a single pol to file
pol_inds = np.arange(1)
data = full_uvh5.data_array[:, :, :, pol_inds]
flags = full_uvh5.flag_array[:, :, :, pol_inds]
nsamples = full_uvh5.nsample_array[:, :, :, pol_inds]
partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples,
polarizations=partial_uvh5.polarization_array[pol_inds])
# also write the arrays to the partial object
partial_uvh5.data_array[:, :, :, pol_inds] = data
partial_uvh5.flag_array[:, :, :, pol_inds] = flags
partial_uvh5.nsample_array[:, :, :, pol_inds] = nsamples
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced blts and freqs
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
blt_inds = [0, 1, 2, 7]
freq_inds = [0, 2, 3, 4]
data_shape = (len(blt_inds), 1, len(freq_inds), full_uvh5.Npols)
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for iblt, blt_idx in enumerate(blt_inds):
for ifreq, freq_idx in enumerate(freq_inds):
data[iblt, :, ifreq, :] = full_uvh5.data_array[blt_idx, :, freq_idx, :]
flags[iblt, :, ifreq, :] = full_uvh5.flag_array[blt_idx, :, freq_idx, :]
nsamples[iblt, :, ifreq, :] = full_uvh5.nsample_array[blt_idx, :, freq_idx, :]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'blt_inds': blt_inds, 'freq_chans': freq_inds},
message='Selected frequencies are not evenly spaced')
# also write the arrays to the partial object
for iblt, blt_idx in enumerate(blt_inds):
for ifreq, freq_idx in enumerate(freq_inds):
partial_uvh5.data_array[blt_idx, :, freq_idx, :] = data[iblt, :, ifreq, :]
partial_uvh5.flag_array[blt_idx, :, freq_idx, :] = flags[iblt, :, ifreq, :]
partial_uvh5.nsample_array[blt_idx, :, freq_idx, :] = nsamples[iblt, :, ifreq, :]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced freqs and pols
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
freq_inds = [0, 1, 2, 7]
pol_inds = [0, 1, 3]
data_shape = (full_uvh5.Nblts, 1, len(freq_inds), len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
data[:, :, ifreq, ipol] = full_uvh5.data_array[:, :, freq_idx, pol_idx]
flags[:, :, ifreq, ipol] = full_uvh5.flag_array[:, :, freq_idx, pol_idx]
nsamples[:, :, ifreq, ipol] = full_uvh5.nsample_array[:, :, freq_idx, pol_idx]
uvtest.checkWarnings(partial_uvh5.write_uvh5_part, [partial_testfile, data, flags, nsamples],
{'freq_chans': freq_inds, 'polarizations': full_uvh5.polarization_array[pol_inds]},
nwarnings=2, message=['Selected frequencies are not evenly spaced',
'Selected polarization values are not evenly spaced'])
# also write the arrays to the partial object
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
partial_uvh5.data_array[:, :, freq_idx, pol_idx] = data[:, :, ifreq, ipol]
partial_uvh5.flag_array[:, :, freq_idx, pol_idx] = flags[:, :, ifreq, ipol]
partial_uvh5.nsample_array[:, :, freq_idx, pol_idx] = nsamples[:, :, ifreq, ipol]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
partial_uvh5_file.read(partial_testfile)
nt.assert_equal(partial_uvh5_file, partial_uvh5)
# test irregularly spaced blts and pols
# reinitialize
partial_uvh5 = copy.deepcopy(full_uvh5)
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = os.path.join(DATA_PATH, 'test', 'outtest_partial.uvh5')
initialize_with_zeros(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(full_uvh5.nsample_array, dtype=np.float32)
# define blts and freqs
blt_inds = [0, 1, 2, 7]
pol_inds = [0, 1, 3]
data_shape = (len(blt_inds), 1, full_uvh5.Nfreqs, len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = | np.zeros(data_shape, dtype=np.bool) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import cauchy, norm, t
from sklearn.neighbors import KernelDensity
from sklearn.grid_search import GridSearchCV
nwl = 10000
fsig = lambda x: (1+0.1*x)**2
sigarr = []
sigarr_expon = []
sigarr_div = []
ijarr = []
### Create a bunch of standard deviations
for ii in range(nwl):
lsig = fsig(ii+1)/nwl
sigarr.append(lsig)
sigarr = np.array(sigarr)
sigarr = np.abs(sigarr)
sigarr_expon = np.array(sigarr_expon)
sigarr_expon = np.abs(sigarr_expon)
sigarr_div = np.array(sigarr_div)
sigarr_div = np.abs(sigarr_div)
ijarr = np.array(ijarr)
### Sample the nwl * (nwl-1)/2 normal distributions
Zarr = np.zeros(nwl)
for m in range(nwl):
sigrand = sigarr[m]
# sigrand = 10
# sigrand = sigarr_expon[m]
# sigrand = sigarr_div[m]
# sigrand = np.random.choice(sigarr_expon)
Xrand = norm.rvs(loc=0,scale=sigrand,size=1)
Zarr[m] = Xrand
### Fit a Cauchy distribution
loc,sca = cauchy.fit(Zarr)
locnorm, scanorm = norm.fit(Zarr)
dft, loct, scat = t.fit(Zarr)
### Compound distribution
#sigarr[:-1] = sigrand
#weights = 1/sigarr_expon
#weights = weights / np.sum(weights)
weights = np.ones_like(sigarr)
pdf_cmb = lambda x: np.sum(weights * 1/sigarr * 1/np.sqrt(2*np.pi) * np.exp(-1/2*x**2/sigarr**2))
#pdf_cmb = lambda x: np.sum(weights * 1/sigarr_expon * 1/np.sqrt(2*np.pi) * np.exp(-1/2*x**2/sigarr_expon**2))
#pdf_cmb = lambda x: np.sum(weights * 1/sigarr_div * 1/np.sqrt(2*np.pi) * np.exp(-1/2*x**2/sigarr_div**2))
### Buhlmann
#v2 = np.var(sigarr)
### KDE
print("KDE")
#bandwidths = 10 ** np.linspace(-3, -2, 100)
#grid = GridSearchCV(KernelDensity(kernel='gaussian'),
# {'bandwidth': bandwidths},
# cv=5,
# verbose = 1)
#grid.fit(Zarr[:, None]);
#print('Best params:',grid.best_params_)
#kde = KernelDensity(bandwidth=grid.best_params_['bandwidth'],
kde = KernelDensity(bandwidth=2,
kernel='gaussian')
kde.fit(Zarr[:, None])
### Plots
# Remove large values for ease of plotting
#Zarr = Zarr[(Zarr < 100) & (Zarr > -100)]
x_d = np.linspace(-1000,1000,10000)
cfit = cauchy.pdf(x_d,loc=loc,scale=sca)
nfit = norm.pdf(x_d,loc=locnorm,scale=scanorm)
#tfit = t.pdf(x_d,df=dft,loc=loct,scale=scat)
logprob_kde = kde.score_samples(x_d[:, None])
pdf_cmb_array = []
for x in x_d:
pdf_cmb_array.append(1/nwl * pdf_cmb(x))
# pdf_cmb_array.append(pdf_cmb(x))
pdf_cmb_array = | np.array(pdf_cmb_array) | numpy.array |
import copy
import numpy as np
class WTFilters(object):
"""Class to generate time series plots of the selected filter data.
Attributes
----------
canvas: MplCanvas
Object of MplCanvas a FigureCanvas
fig: Object
Figure object of the canvas
units: dict
Dictionary of units conversions
beam: object
Axis of figure for number of beams
error: object
Axis of figure for error velocity
vert: object
Axis of figure for vertical velocity
speed: object
Axis of figure for speed time series
snr: object
Axis of figure for snr filters
"""
def __init__(self, canvas):
"""Initialize object using the specified canvas.
Parameters
----------
canvas: MplCanvas
Object of MplCanvas
"""
# Initialize attributes
self.canvas = canvas
self.fig = canvas.fig
self.units = None
self.beam = None
self.error = None
self.vert = None
self.speed = None
self.snr = None
self.hover_connection = None
def create(self, transect, units, selected):
"""Create the axes and lines for the figure.
Parameters
----------
transect: TransectData
Object of TransectData containing boat speeds to be plotted
units: dict
Dictionary of units conversions
selected: str
String identifying the type of plot
"""
# Assign and save parameters
self.units = units
# Clear the plot
self.fig.clear()
# Configure axis
self.fig.ax = self.fig.add_subplot(1, 1, 1)
# Set margins and padding for figure
self.fig.subplots_adjust(left=0.07, bottom=0.2, right=0.99, top=0.98, wspace=0.1, hspace=0)
self.fig.ax.set_xlabel(self.canvas.tr('Ensembles'))
self.fig.ax.grid()
self.fig.ax.xaxis.label.set_fontsize(12)
self.fig.ax.yaxis.label.set_fontsize(12)
self.fig.ax.tick_params(axis='both', direction='in', bottom=True, top=True, left=True, right=True)
ensembles = np.arange(1, len(transect.boat_vel.bt_vel.u_mps) + 1)
ensembles = np.tile(ensembles, (transect.w_vel.valid_data[0, :, :].shape[0], 1))
cas = transect.w_vel.cells_above_sl
if selected == 'beam':
# Plot beams
# Determine number of beams for each ensemble
wt_temp = copy.deepcopy(transect.w_vel)
wt_temp.filter_beam(4)
valid_4beam = wt_temp.valid_data[5, :, :].astype(int)
beam_data = np.copy(valid_4beam).astype(int)
beam_data[valid_4beam == 1] = 4
beam_data[wt_temp.valid_data[6, :, :]] = 4
beam_data[valid_4beam == 0] = 3
beam_data[ | np.logical_not(transect.w_vel.valid_data[1, :, :]) | numpy.logical_not |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&75': np.array([0.0, 0.47562425924289314]),
'versicolor&2&76': np.array([0.0, 0.4854368956593117]),
'versicolor&2&77': np.array([0.0, -0.7348263896003956]),
'versicolor&2&78': np.array([0.0, -0.7920887571493729]),
'versicolor&2&79': np.array([0.0, -0.507614207038711]),
'versicolor&2&80': np.array([0.0, 0.4854368956593117]),
'versicolor&2&81': np.array([0.0, -0.3982542883933272]),
'versicolor&2&82': np.array([0.0, -0.08633733326458487]),
'versicolor&2&83': np.array([0.0, 0.4039238345412103]),
'versicolor&2&84': np.array([0.0, -0.38897705551367706]),
'versicolor&2&85': np.array([0.0, -0.06915310813754129]),
'versicolor&2&86': np.array([0.0, 0.41580041887839214]),
'versicolor&2&87': np.array([0.0, -0.7348263896003956]),
'versicolor&2&88': np.array([0.0, -0.7348263896003956]),
'versicolor&2&89': np.array([0.0, -0.7920887571493729]),
'versicolor&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&92': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&93': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&96': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&97': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&99': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&100': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&102': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&103': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&104': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&107': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&108': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&111': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&112': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&114': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&115': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&117': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&118': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&119': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&120': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&121': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&122': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&123': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&124': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&125': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&126': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&127': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&128': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&129': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&130': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&131': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&132': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&133': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&134': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&137': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&138': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&141': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&142': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&144': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&145': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&147': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&148': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&149': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&152': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&153': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&156': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&157': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&159': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&160': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&162': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&163': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&164': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&167': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&168': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&171': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&172': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&174': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&175': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&177': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&178': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&179': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&180': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&181': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&182': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&183': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&184': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&185': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&186': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&187': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&188': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&189': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&190': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&191': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&192': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&193': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&194': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&197': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&198': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&201': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&202': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&204': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&205': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&207': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&208': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&209': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&212': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&213': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&216': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&217': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&219': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&220': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&222': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&223': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&224': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&225': np.array([-0.5775629083348267, 0.7118687782288384]),
'versicolor&2&226': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&227': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&228': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&229': np.array([-0.25603689955471853, -0.451727980232351]),
'versicolor&2&230': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&231': np.array([-0.1269405801024398, -0.34161216844748166]),
'versicolor&2&232': np.array([-0.33176333807327857, 0.09538228407203546]),
'versicolor&2&233': np.array([-0.564696311454556, 0.6421194512020755]),
'versicolor&2&234': np.array([-0.12669523681593967, -0.32786313310034665]),
'versicolor&2&235': np.array([-0.35960845047491363, 0.1335988694092619]),
'versicolor&2&236': np.array([-0.589572650064144, 0.6697478899606418]),
'versicolor&2&237': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&238': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&239': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'versicolor&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&242': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&243': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'versicolor&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&246': np.array([0.4022739113634462, -0.4700171786183992]),
'versicolor&2&247': np.array([0.5046771347249378, -0.33609610934748635]),
'versicolor&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'versicolor&2&249': np.array([0.41683021879255133, -0.4812793747667524]),
'versicolor&2&250': np.array([0.5150371666265885, -0.33852139184639396]),
'versicolor&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'versicolor&2&252': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&253': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&254': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&257': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&258': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&261': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&262': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&264': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&265': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&267': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&268': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&269': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&270': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&271': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&275': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&276': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&277': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&278': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&279': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&280': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&281': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&285': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&286': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&290': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&291': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&292': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&293': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&294': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&295': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&296': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&300': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&301': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&305': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&306': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&307': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&308': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&309': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&310': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&311': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'virginica&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&2': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&3': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&6': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&7': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&9': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&10': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&12': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&13': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&14': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&17': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&18': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&21': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&22': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&24': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&25': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&27': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&28': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&29': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&32': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&33': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&36': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&37': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&39': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&40': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&42': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&43': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&44': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&45': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&46': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&47': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&48': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&49': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&50': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&51': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&52': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&53': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&54': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&55': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&56': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&57': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&58': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&59': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&62': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&63': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&66': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&67': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&69': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&70': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&72': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&73': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&74': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&0&75': np.array([0.0, -0.95124502153736]),
'virginica&0&76': np.array([0.0, -0.9708703761803881]),
'virginica&0&77': np.array([0.0, -0.5659706098422994]),
'virginica&0&78': np.array([0.0, -0.3962828716108186]),
'virginica&0&79': np.array([0.0, 0.2538069363248767]),
'virginica&0&80': np.array([0.0, -0.9708703761803881]),
'virginica&0&81': np.array([0.0, -0.5659706098422994]),
'virginica&0&82': np.array([0.0, -0.3962828716108186]),
'virginica&0&83': np.array([0.0, -0.8943993997517804]),
'virginica&0&84': np.array([0.0, -0.5659706098422994]),
'virginica&0&85': np.array([0.0, -0.3962828716108186]),
'virginica&0&86': np.array([0.0, -0.9166476163222441]),
'virginica&0&87': np.array([0.0, -0.3962828716108186]),
'virginica&0&88': np.array([0.0, -0.5466925844560601]),
'virginica&0&89': np.array([0.0, -0.38529908946531777]),
'virginica&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&92': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&93': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&96': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&97': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&99': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&100': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&102': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&103': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&104': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&107': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&108': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&111': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&112': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&114': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&115': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&117': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&118': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&119': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&120': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&121': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&122': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&123': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&124': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&125': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&126': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&127': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&128': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&129': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&130': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&131': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&132': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&133': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&134': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&137': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&138': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&141': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&142': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&144': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&145': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&147': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&148': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&149': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&152': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&153': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&156': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&157': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&159': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&160': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&162': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&163': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&164': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&167': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&168': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&171': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&172': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&174': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&175': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&177': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&178': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&179': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&180': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&181': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&182': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&183': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&184': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&185': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&186': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&187': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&188': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&189': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&190': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&191': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&192': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&193': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&194': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&197': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&198': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&201': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&202': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&204': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&205': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&207': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&208': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&209': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&212': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&213': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&216': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&217': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&219': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&220': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&222': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&223': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&224': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&225': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&226': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&227': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&228': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&229': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&230': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&231': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&232': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&233': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&234': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&235': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&236': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&237': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&238': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&239': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'virginica&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&242': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&243': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'virginica&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&246': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&247': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'virginica&0&249': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&250': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'virginica&0&252': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&253': np.array([-0.544626974647221, -0.24972982107967573]),
'virginica&0&254': np.array([-0.6426355680762406, -0.20016519137103667]),
'virginica&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&257': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&258': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&261': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&262': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&264': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&265': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&267': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&268': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&269': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&270': np.array([-0.04201361383207032, -0.9372571358382161]),
'virginica&0&271': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&272': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&273': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&274': np.array([-0.4167677904879879, 0.22207334821665425]),
'virginica&0&275': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&276': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&277': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&278': np.array([-0.07857689135903215, -0.8696882596532965]),
'virginica&0&279': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&280': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&281': np.array([-0.05160969201296555, -0.9000166344885441]),
'virginica&0&282': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&283': np.array([-0.0766197045034485, -0.5080325256323984]),
'virginica&0&284': np.array([-0.32767091750230254, -0.19689316772421933]),
'virginica&0&285': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&286': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&287': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&288': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&289': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&290': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&291': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&292': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&293': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&294': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&295': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&296': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&297': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&298': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&299': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&302': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&303': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&306': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&307': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&309': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&310': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&312': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&313': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&314': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&2': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&3': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&6': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&7': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&9': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&10': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&12': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&13': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&14': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&17': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&18': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&21': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&22': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&24': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&25': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&27': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&28': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&29': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&32': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&33': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&36': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&37': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&39': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&40': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&42': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&43': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&44': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&45': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&46': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&47': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&48': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&49': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&50': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&51': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&52': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&53': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&54': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&55': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&56': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&57': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&58': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&59': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&62': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&63': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&66': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&67': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&69': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&70': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&72': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&73': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&74': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&1&75': np.array([0.0, 0.4756207622944677]),
'virginica&1&76': np.array([0.0, 0.4854334805210761]),
'virginica&1&77': np.array([0.0, -0.16885577975809632]),
'virginica&1&78': np.array([0.0, -0.39580588553855395]),
'virginica&1&79': np.array([0.0, 0.2538072707138344]),
'virginica&1&80': np.array([0.0, 0.4854334805210761]),
'virginica&1&81': np.array([0.0, -0.16885577975809632]),
'virginica&1&82': np.array([0.0, -0.39580588553855395]),
'virginica&1&83': np.array([0.0, 0.4904755652105692]),
'virginica&1&84': np.array([0.0, -0.16885577975809632]),
'virginica&1&85': np.array([0.0, -0.39580588553855395]),
'virginica&1&86': np.array([0.0, 0.5008471974438506]),
'virginica&1&87': np.array([0.0, -0.39580588553855395]),
'virginica&1&88': np.array([0.0, -0.14423919730424817]),
'virginica&1&89': np.array([0.0, -0.3847817540585927]),
'virginica&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&92': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&93': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&96': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&97': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&99': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&100': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&102': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&103': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&104': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&107': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&108': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&111': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&112': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&114': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&115': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&117': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&118': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&119': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&120': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&121': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&122': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&123': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&124': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&125': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&126': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&127': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&128': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&129': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&130': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&131': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&132': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&133': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&134': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&137': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&138': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&141': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&142': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&144': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&145': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&147': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&148': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&149': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&152': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&153': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&156': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&157': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&159': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&160': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&162': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&163': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&164': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&167': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&168': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&171': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&172': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&174': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&175': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&177': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&178': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&179': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&180': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&181': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&182': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&183': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&184': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&185': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&186': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&187': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&188': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&189': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&190': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&191': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&192': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&193': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&194': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&197': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&198': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&201': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&202': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&204': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&205': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&207': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&208': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&209': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&212': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&213': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&216': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&217': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&219': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&220': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&222': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&223': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&224': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&225': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&226': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&227': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&228': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&229': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&230': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&231': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&232': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&233': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&234': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&235': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&236': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&237': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&238': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&239': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'virginica&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&242': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&243': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'virginica&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&246': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&247': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'virginica&1&249': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&250': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'virginica&1&252': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&253': np.array([0.581569928198426, -0.46134543884925855]),
'virginica&1&254': np.array([0.42361197252581306, -0.5068181610814407]),
'virginica&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&257': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&258': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&261': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&262': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&264': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&265': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&267': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&268': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&269': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&270': np.array([-0.6288817118959938, 0.6849987400957501]),
'virginica&1&271': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&272': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&273': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&274': np.array([-0.3507937472799825, 0.22709708691079003]),
'virginica&1&275': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&276': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&277': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&278': np.array([-0.6219129029345898, 0.6860569455333333]),
'virginica&1&279': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&280': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&281': np.array([-0.6423063482710314, 0.7078274136226649]),
'virginica&1&282': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&283': np.array([-0.38798262782075055, 0.05152547330256509]),
'virginica&1&284': np.array([-0.23804537254556749, -0.24790919248823104]),
'virginica&1&285': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&286': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&287': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&288': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&289': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&290': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&291': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&292': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&293': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&294': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&295': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&296': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&297': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&298': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&299': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&302': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&303': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&306': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&307': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&309': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&310': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&312': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&313': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&314': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&2': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&3': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&6': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&7': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&9': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&10': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&12': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&13': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&14': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&17': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&18': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&21': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&22': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&24': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&25': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&27': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&28': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&29': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'virginica&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&32': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&33': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'virginica&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&36': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&37': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'virginica&2&39': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&40': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'virginica&2&42': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&43': np.array([0.15466782862660866, 0.5877736906472755]),
'virginica&2&44': np.array([0.37833006296225374, 0.5922410451071548]),
'virginica&2&45': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&46': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&47': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&48': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&49': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&50': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&51': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&52': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&53': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&54': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&55': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&56': np.array([0.835455914569297, 0.10189258327760495]),
'virginica&2&57': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&58': np.array([0.6958244586699014, 0.2551528503043789]),
'virginica&2&59': np.array([0.7857855057542923, 0.17526869720012267]),
'virginica&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'virginica&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'virginica&2&62': np.array([-0.2661726847443776, 0.6902916602462779]),
'virginica&2&63': np.array([-0.2741128763380603, 0.7260889090887469]),
'virginica&2&64': | np.array([-0.6188410763351541, -0.22803625884668638]) | numpy.array |
from __future__ import print_function
try:
import h5py
WITH_H5PY = True
except ImportError:
WITH_H5PY = False
try:
import zarr
WITH_ZARR = True
from .io import IoZarr
except ImportError:
WITH_ZARR = False
try:
import z5py
WITH_Z5PY = True
from .io import IoN5
except ImportError:
WITH_Z5PY = False
import os
import json
from random import shuffle
import numpy as np
import re
import fnmatch
from .inference import load_input_crop
import dask
import toolz as tz
import logging
def _offset_list(shape, output_shape):
in_list = []
for z in np.arange(0, shape[0], output_shape[0]):
for y in np.arange(0, shape[1], output_shape[1]):
for x in np.arange(0, shape[2], output_shape[2]):
in_list.append([float(z), float(y), float(x)])
return in_list
# NOTE this will not cover the whole volume
def _offset_list_with_shift(shape, output_shape, shift):
in_list = []
for z in np.arange(0, shape[0], output_shape[0]):
for y in np.arange(0, shape[1], output_shape[1]):
for x in np.arange(0, shape[2], output_shape[2]):
in_list.append([min(float(z) + shift[0], shape[0]),
min(float(y) + shift[1], shape[1]),
min(float(x) + shift[2], shape[2])])
return in_list
# this returns the offsets for the given output blocks.
# blocks are padded on the fly during inference if necessary
def get_offset_lists(shape,
gpu_list,
save_folder,
output_shape,
randomize=False,
shift=None):
in_list = _offset_list(shape, output_shape) if shift is None else\
_offset_list_with_shift(shape, output_shape, shift)
if randomize:
shuffle(in_list)
n_splits = len(gpu_list)
out_list = [in_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
# this returns the offsets for the given output blocks and bounding box.
# blocks are padded on the fly during inference if necessary
def get_offset_lists_with_bb(shape,
gpu_list,
save_folder,
output_shape,
bb_start,
bb_stop,
randomize=False):
# zap the bounding box to grid defined by out_blocks
bb_start_c = [(bbs // outs) * outs for bbs, outs in zip(bb_start, output_shape)]
bb_stop_c = [(bbs // outs + 1) * outs for bbs, outs in zip(bb_stop, output_shape)]
in_list = []
for z in range(bb_start_c[0], bb_stop_c[0], output_shape[0]):
for y in range(bb_start_c[1], bb_stop_c[1], output_shape[1]):
for x in range(bb_start_c[2], bb_stop_c[2], output_shape[2]):
in_list.append([z, y, x])
if randomize:
shuffle(in_list)
n_splits = len(gpu_list)
out_list = [in_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
# redistributing offset lists from failed jobs
def redistribute_offset_lists(gpu_list, save_folder):
p_full = re.compile("list_gpu_\d+.json")
p_proc = re.compile("list_gpu_\d+_\S*_processed.txt")
full_list_jsons = []
processed_list_files = []
for f in os.listdir(save_folder):
mo_full = p_full.match(f)
mo_proc = p_proc.match(f)
if mo_full is not None:
full_list_jsons.append(f)
if mo_proc is not None:
processed_list_files.append(f)
full_block_list = set()
for fl in full_list_jsons:
with open(os.path.join(save_folder, fl), 'r') as f:
bl = json.load(f)
full_block_list.update({tuple(coo) for coo in bl})
processed_block_list = set()
bls = []
for pl in processed_list_files:
with open(os.path.join(save_folder, pl), 'r') as f:
bl_txt = f.read()
bl_txt = '[' + bl_txt[:bl_txt.rfind(']') + 1] + ']'
bls.append(json.loads(bl_txt))
processed_block_list.update({tuple(coo) for coo in bls[-1]})
to_be_processed_block_list = list(full_block_list - processed_block_list)
previous_tries = []
p_tries = re.compile("list_gpu_\d+_try\d+.json")
for f in os.listdir(save_folder):
mo_tries = p_tries.match(f)
if mo_tries is not None:
previous_tries.append(f)
if len(previous_tries) == 0:
tryno = 0
else:
trynos = []
for tr in previous_tries:
trynos.append(int(tr.split('try')[1].split('.json')[0]))
tryno = max(trynos)+1
print('Backing up last try ({0:})'.format(tryno))
for f in full_list_jsons:
os.rename(os.path.join(save_folder,f), os.path.join(save_folder, f[:-5] + '_try{0:}.json'.format(tryno)))
for f in processed_list_files:
os.rename(os.path.join(save_folder,f), os.path.join(save_folder, f[:-4] + '_try{0:}.txt'.format(tryno)))
n_splits = len(gpu_list)
out_list = [to_be_processed_block_list[i::n_splits] for i in range(n_splits)]
for ii, olist in enumerate(out_list):
if len(olist) > 0:
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
def load_ds(path, key):
ext = os.path.splitext(path)[-1]
if ext.lower() in ('.h5', '.hdf', '.hdf'):
assert WITH_H5PY
with h5py.File(path, 'r') as f:
ds = f[key]
elif ext.lower() in ('.zr', '.zarr', '.n5'):
assert WITH_Z5PY or WITH_ZARR
if WITH_ZARR:
f = zarr.open(path)
ds = f[key]
elif WITH_Z5PY:
with z5py.File(path) as f:
ds = f[key]
return ds
def generate_list_for_mask(offset_file_json, output_shape_wc, path, mask_ds, n_cpus, mask_voxel_size=None):
mask = load_ds(path, mask_ds)
if mask_voxel_size is None:
if "pixelResolution" in mask.attrs:
mask_voxel_size = mask.attrs["pixelResolution"]["dimensions"]
elif "resolution" in mask.attrs:
mask_voxel_size = mask.attrs["resolution"]
else:
mask_voxel_size = (1,) * len(output_shape_wc)
logging.warning("Did not find resolution information in attributes, defaulting to {0:}".format(mask_voxel_size))
shape_wc = tuple( | np.array(mask.shape) | numpy.array |
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2019 Intel Corporation
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils.version import LooseVersion
import inspect
import itertools
import os
import platform
import sys
import unittest
import warnings
import time
import json
from collections.abc import Iterable
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import horovod.torch as hvd
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath
_1_5_api = LooseVersion(torch.__version__) >= LooseVersion('1.5.0')
ccl_supported_types = set([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor])
# Set environment variable for dynamic timeline API test
os.environ["HOROVOD_TIMELINE"] = "DYNAMIC"
class TorchTests(unittest.TestCase):
"""
Tests for ops in horovod.torch.
"""
def __init__(self, *args, **kwargs):
super(TorchTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def convert_cpu_fp16_to_fp32(self, *values):
# PyTorch doesn't support any CPU ops on FP16 tensors.
# In case we need to do ops, we will convert tensor to FP32 here.
result = []
for value in values:
if value.dtype in [torch.float16, torch.HalfTensor] and not value.is_cuda:
result.append(value.float())
else:
result.append(value)
return result
def cast_and_place(self, tensor, dtype):
if dtype.is_cuda:
return tensor.cuda(hvd.local_rank()).type(dtype)
return tensor.type(dtype)
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not torch.cuda.is_available():
skip_or_fail_gpu_test(self, "No GPUs available")
@pytest.mark.skipif(platform.system() == 'Darwin', reason='Reinit not supported on macOS')
def test_horovod_reinit(self):
"""Test that Horovod can init -> shutdown -> init successfully."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Horovod cannot be re-initialized after shutdown when using MPI, so
# this test can only be done using the Gloo controller
self.skipTest("Gloo is not available")
hvd.init()
rank, size = hvd.rank(), hvd.size()
hvd.shutdown()
hvd.init()
rank2, size2 = hvd.rank(), hvd.size()
assert rank == rank2
assert size == size2
def test_horovod_is_initialized(self):
"""Test that is_initialized returned by hvd.is_initialized() is correct."""
hvd.init()
assert hvd.is_initialized()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Only applies for Gloo
self.skipTest("Gloo is not available")
hvd.shutdown()
assert not hvd.is_initialized()
hvd.init()
def test_horovod_rank(self):
"""Test that the rank returned by hvd.rank() is correct."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
# The mpi rank does not match gloo rank, we need to figure which one
# we are using to run the test.
is_mpi = gloo_rank == -1
hvd.init()
rank = hvd.rank()
if is_mpi:
assert mpi_rank == rank
else:
assert gloo_rank == rank
def test_horovod_size(self):
"""Test that the size returned by hvd.size() is correct."""
_, mpi_size = mpi_env_rank_and_size()
gloo_size = int(os.getenv('HOROVOD_SIZE', -1))
# The mpi size does not match gloo size, we need to figure which one
# we are using to run the test.
is_mpi = gloo_size == -1
hvd.init()
size = hvd.size()
if is_mpi:
assert mpi_size == size
else:
assert gloo_size == size
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False)
tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)
multiplied = tensor * size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
averaged = hvd.allreduce(tensor, average=True)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged, tensor, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
multiplied = self.cast_and_place(tensor * size, dtype)
tensor = self.cast_and_place(tensor, dtype)
hvd.allreduce_(tensor, average=False)
tensor, multiplied = self.convert_cpu_fp16_to_fp32(tensor, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_async_fused(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allreduce_async(tensor, average=False)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tensor, = self.convert_cpu_fp16_to_fp32(tensor)
multiplied = tensor * size
tests.append((dtype, multiplied, handle))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for dtype, multiplied, handle in tests:
summed = hvd.synchronize(handle)
summed, = self.convert_cpu_fp16_to_fp32(summed)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_multi_gpu(self):
"""Test that the allreduce works on multiple GPUs."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
iter = 0
dtypes = [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
iter += 1
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
device = local_rank * 2 + (iter + local_rank) % 2
tensor = tensor.cuda(device).type(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = factor * tensor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
multiplied *= size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = size * tensor
multiplied = multiplied * factor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
torch.manual_seed(1234)
dims = [17 + rank] * 3
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
# Same number of elements, different rank
torch.manual_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.cuda.FloatTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_duplicate_name_error(self):
"""Test that the allreduce raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allreduce_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allreduce_async(tensor, name='duplicate_name')
assert False, 'hvd.allreduce_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=False)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=True)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim)
err = | np.linalg.norm(expected - grad_out) | numpy.linalg.norm |
import numpy as np
class ConditionNames:
def __init__(self):
pass
REFLECTION_CONDITION = 'reflection'
CYCLE_CONDITION = 'cycle'
ABSORBING_CONDITION = 'absorb'
APPLIED_FORCE_CONDITION = 'applied_force'
class ProblemTypes:
def __init__(self):
pass
ACOUSTIC = 'acoustic'
SEISMIC = 'seismic'
class SolverMethods:
def __init__(self):
pass
BIOCOMPACT = 'bicompact'
LAX_WENDROFF = 'lax_wendroff'
BEAM_WARMING = 'beam_warming'
KIR = 'kir'
TVD = 'tvd'
WENO = 'weno'
MCCORMACK = "McCormack"
cells_for_method = {BIOCOMPACT: (1, 1), LAX_WENDROFF: (3, 3), BEAM_WARMING: (1, 1), KIR: (1, 1), TVD: (1, 1), WENO: (1, 1),MCCORMACK : (2,2) }
@classmethod
def get_cells_amount_left(cls, method_name):
return SolverMethods.cells_for_method[method_name][0]
@classmethod
def get_cells_amount_right(cls, method_name):
return SolverMethods.cells_for_method[method_name][1]
class Directions:
def __init__(self):
pass
X = 'x'
Y = 'y'
p = 0 # index of pressure in values array
v = 1 # index of velocity (x-component) in values array
u = 2 # index of velocity (y-component) in values array, only for 2d case
def border_condition_1d(grid, type_of_task, border_left, border_right, method_name, time, force_left=0, force_right=0):
"""
Applies border conditions to 'grid' array and returns updated version of it.
Needs to have 'type_of_task' and 'method_name' specified by a string from 'ProblemTypes' class.
Additional arguments:
- 'border_left' - a string from 'ConditionNames' class, specifying type of left border;
- 'border_right' - a string from 'ConditionNames' class, specifying type of right border;
- 'force_left' - applied force at the left border;
- 'force_right' - applied force at the right border
"""
if type_of_task == ProblemTypes.ACOUSTIC:
return border_condition_1d_acoustic(grid, type_of_task, border_left, border_right, method_name, time, force_left,
force_right)
elif type_of_task == ProblemTypes.SEISMIC:
return border_condition_1d_seismic(grid, type_of_task, border_left, border_right, method_name, time, force_left,
force_right)
def border_condition_1d_acoustic(grid, type_of_task, border_left, border_right, method_name, time, force_left=0,
force_right=0):
cells_left = SolverMethods.get_cells_amount_left(method_name)
cells_right = SolverMethods.get_cells_amount_right(method_name)
sizes = [len(grid[0]), len(grid[0][0])]
grid_new = np.zeros((cells_left, sizes[0], sizes[1]))
# Check left border.
if border_left == ConditionNames.REFLECTION_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time][p] = grid[cells_left - 1 - i][time][p]
grid_new[i][time][v] = -grid[cells_left - 1 - i][time][v]
elif border_left == ConditionNames.CYCLE_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time] = grid[len(grid) - cells_left + i][time]
#
elif border_left == ConditionNames.ABSORBING_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time] = np.zeros(grid_new.shape[2])
elif border_left == ConditionNames.APPLIED_FORCE_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time][v] = grid[cells_left - 1 - i][time][v]
grid_new[i][time][p] = 2 * force_left - grid[cells_left - 1 - i][time][p]
ext_grid = np.concatenate((grid_new, grid), axis=0)
grid_new = np.zeros((cells_right, sizes[0], sizes[1]))
# Check right border.
if border_right == ConditionNames.REFLECTION_CONDITION:
for i in range(cells_right - 1, -1, -1):
grid_new[i][time][p] = grid[len(grid) - 1 - i][time][p]
grid_new[i][time][v] = -grid[len(grid) - 1 - i][time][v]
elif border_right == ConditionNames.CYCLE_CONDITION:
for i in range(cells_right - 1, -1, -1):
grid_new[i][time] = grid[i][time]
elif border_right == ConditionNames.ABSORBING_CONDITION:
for i in range(cells_right - 1, -1, -1):
grid_new[i][time] = np.zeros(grid_new.shape[2])
elif border_right == ConditionNames.APPLIED_FORCE_CONDITION:
for i in range(cells_right - 1, -1, -1):
grid_new[i][time][v] = grid[len(grid) - 1 - i][time][v]
grid_new[i][time][p] = 2 * force_right - grid[len(grid) - 1 - i][time][p]
ext_grid = np.concatenate((ext_grid, grid_new), axis=0)
return ext_grid
def border_condition_1d_seismic(arr, type_of_task, border_left, border_right, method_name, time, force_left, force_right):
# for 1d seismic and acoustic conditions are the same
return border_condition_1d_acoustic(arr, type_of_task, border_left, border_right, method_name, time, force_left,
force_right)
def border_condition_2d_acoustic(grid, border_left, border_right, method_name, time, direction=Directions.X,
force_left=0, force_right=0):
cells_left = SolverMethods.get_cells_amount_left(method_name)
cells_right = SolverMethods.get_cells_amount_right(method_name)
grid_new = np.zeros((cells_left, grid.shape[1], grid.shape[2]))
# Check left border.
if border_left == ConditionNames.REFLECTION_CONDITION:
for i in range(cells_left - 1, -1, -1):
if direction == Directions.X:
grid_new[i][time][v] = -grid[cells_left - 1 - i][time][v]
grid_new[i][time][u] = grid[cells_left - 1 - i][time][u]
else:
grid_new[i][time][v] = grid[cells_left - 1 - i][time][v]
grid_new[i][time][u] = -grid[cells_left - 1 - i][time][u]
grid_new[i][time][p] = grid[cells_left - 1 - i][time][p]
elif border_left == ConditionNames.CYCLE_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time] = grid[len(grid) - cells_left + i][time]
#
elif border_left == ConditionNames.ABSORBING_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time] = np.zeros(grid.shape[2])
elif border_left == ConditionNames.APPLIED_FORCE_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time][v] = grid[cells_left - 1 - i][time][v]
grid_new[i][time][u] = grid[cells_left - 1 - i][time][u]
grid_new[i][time][p] = 2 * force_left - grid[cells_left - 1 - i][time][p]
ext_grid = np.concatenate((grid_new, grid), axis=0)
grid_new = np.zeros((cells_right, grid.shape[1], grid.shape[2]))
# Check right border.
if border_right == ConditionNames.REFLECTION_CONDITION:
for i in range(cells_right - 1, -1, -1):
if direction == Directions.X:
grid_new[i][time][v] = -grid[len(grid) - 1 - i][time][v]
grid_new[i][time][u] = grid[len(grid) - 1 - i][time][u]
else:
grid_new[i][time][v] = grid[len(grid) - 1 - i][time][v]
grid_new[i][time][u] = -grid[len(grid) - 1 - i][time][u]
grid_new[i][time][p] = grid[len(grid) - 1 - i][time][p]
elif border_right == ConditionNames.CYCLE_CONDITION:
for i in range(cells_right - 1, -1, -1):
grid_new[i][time] = grid[i][time]
elif border_right == ConditionNames.ABSORBING_CONDITION:
for i in range(cells_right - 1, -1, -1):
grid_new[i][time] = np.zeros(grid.shape[2])
elif border_right == ConditionNames.APPLIED_FORCE_CONDITION:
for i in range(cells_left - 1, -1, -1):
grid_new[i][time][v] = grid[len(grid) - 1 - i][time][v]
grid_new[i][time][u] = grid[len(grid) - 1 - i][time][u]
grid_new[i][time][p] = 2 * force_right - grid[len(grid) - 1 - i][time][p]
ext_grid = | np.concatenate((ext_grid, grid_new), axis=0) | numpy.concatenate |
#!/usr/bin/env python
"""
regression_gp.py
Author: <NAME>
Modified from http://katbailey.github.io/post/gaussian-processes-for-dummies/
and https://github.com/probml/pmtk3
Description:
Performs regression using a Gaussian Process framework
Also generates the data to be used by other scripts
"""
import numpy as np
import pandas as pd
from scipy import signal
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
def se_covariance(x1, x2, param):
sqdist = np.sum(x1**2,1).reshape(-1,1) + np.sum(x2**2,1) - 2*np.dot(x1, x2.T)
return np.exp(-.5 * (1/param) * sqdist)
def generate_data(n, n_s):
x = ( | np.linspace(0, 4*np.pi, n) | numpy.linspace |
from pydro.detection import FilterPyramid, DeformationCost, Score
import itertools
import numpy
from collections import namedtuple
import weakref
__all__ = [
'Offset',
'Block',
'Def',
'DeformationRule',
'Features',
'Filter',
'Loc',
'Model',
'Rule',
'Stats',
'StructuralRule',
'Symbol',
'FilteredSymbol',
'FilteredStructuralRule',
'FilteredDeformationRule',
'TreeNode',
'Leaf',
]
TreeRoot = namedtuple('TreeRoot', 'x1,x2,y1,y2,s,child,loss,model')
TreeNode = namedtuple('TreeNode', 'x,y,l,symbol,ds,s,children,rule,loss')
Leaf = namedtuple('Leaf', 'x1,x2,y1,y2,scale,x,y,l,s,ds,symbol')
class Model(object):
def __init__(self, clss, year, note, start, maxsize, minsize,
interval, sbin, thresh, type, features, stats):
self.clss = clss
self.year = year
self.note = note
self.start = start
self.maxsize = maxsize
self.minsize = minsize
self.interval = interval
self.sbin = sbin
self.thresh = thresh
self.type = type
self.features = features
self.stats = stats
def Filter(self, pyramid, loss_adjustment=None):
return FilteredModel(self, pyramid, loss_adjustment)
def GetBlocks(self):
return self.start.GetBlocks()
class FilteredModel (Model):
def __init__(self, model, pyramid, loss_adjustment):
super(FilteredModel, self).__init__(
clss=model.clss,
year=model.year,
note=model.note,
start=model.start,
maxsize=model.maxsize,
minsize=model.minsize,
interval=model.interval,
sbin=model.sbin,
thresh=model.thresh,
type=model.type,
features=model.features,
stats=model.stats,
)
self.size = self.start.GetFilteredSize(pyramid)
self.loss_adjustment = loss_adjustment
self.pyramid = pyramid
self.start = model.start.Filter(self)
def Filter(self, loss_adjustment=None):
return FilteredModel(self, self.pyramid, self.loss_adjustment)
def Parse(self, threshold):
X = numpy.array([], dtype=numpy.uint32)
Y = | numpy.array([], dtype=numpy.uint32) | numpy.array |
import unittest
import os
import glob
import sys
import numpy as np
from numpy.random import default_rng
from astropy.io import fits
from astropy import units as u
from nrm_analysis.misctools.utils import Affine2d
"""
Test Affine2d class' Identity transformation
anand 2022.04.05
run with pytest -s _moi_.py to see stdout on screen
"""
class Affine2dTestCase(unittest.TestCase):
def setUp(self):
# No test data on disk... make tst data here.
mx, my = 1.0, 1.0
sx, sy= 0.0, 0.0
xo, yo= 0.0, 0.0
self.aff_id = Affine2d(mx=mx,my=my,
sx=sx,sy=sy,
xo=xo,yo=yo, name="Ideal")
# create nvecs random x,y locations
nvecs = 1000000
self.x = | np.random.uniform(-1000.0, 10.0, nvecs) | numpy.random.uniform |
import numpy as np
import eqsig
from liquepy.element.models import ShearTest
from liquepy.element import assess
def test_with_one_cycle_no_dissipation():
strs = np.array([0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0])
tau = np.array([0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0])
expected_energy = 0
assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1])
def test_with_one_cycle_no_dissipation_with_offset():
strs = np.array([0, -1, -2, -3, -4, -3, -2, -1, 0, 1, 2, 3, 4, 3, 2, 1, 0]) + 4
tau = np.array([0, -2, -4, -6, -8, -6, -4, -2, 0, 2, 4, 6, 8, 6, 4, 2, 0])
expected_energy = 0
assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1])
def test_with_one_cycle_circle():
angle = np.linspace(0, 2 * np.pi, 3600)
strs = 4 * np.sin(angle)
tau = 4 * np.cos(angle)
expected_energy = 4 ** 2 * np.pi
assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1])
def test_with_one_cycle_circle_with_offset():
angle = np.linspace(0, 2 * np.pi, 3600)
strs = 4 * np.sin(angle) + 4
tau = 4 * np.cos(angle) + 10
expected_energy = 4 ** 2 * np.pi
assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1])
def test_with_one_cycle_triangles():
strs = np.array([0, -1, -2, -3, -4, -4, -3, -2, -1, 0, 1, 2, 3, 4, 4, 3, 2, 1, 0])
tau = np.array([0, -2, -4, -6, -8, 0, 0, 0, 0, 0, 2, 4, 6, 8, 0, 0, 0, 0, 0])
expected_energy = 8 * 4.
assert np.isclose(expected_energy, assess.calc_diss_energy_fd(tau, strs)[-1])
def test_average_of_absolute_simple():
values = np.array([4, -3])
expected = 12.5 / 7
av_abs = assess.average_of_absolute_via_trapz(values)
assert np.isclose(av_abs, expected), (av_abs, expected)
def test_average_of_absolute_matching_neg():
values = np.array([3, -3, 3])
expected = 1.5
av_abs = assess.average_of_absolute_via_trapz(values)
assert np.isclose(av_abs[0], expected), (av_abs[0], expected)
assert np.isclose(av_abs[1], expected), (av_abs[1], expected)
def test_determine_cum_stored_energy_series_simple():
gamma = np.array([0, 4, 0, -3, 0])
tau = np.array([0, 4, 0, -3, 0])
two_times_triangle_1 = 2 * (4 * 4 / 2)
two_times_triangle_2 = 2 * (3 * 3 / 2)
expected_energy = two_times_triangle_1 + two_times_triangle_2
et = ShearTest(tau, gamma, 1)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_energy, (energy[-1], expected_energy)
def test_small_cycle_behaviour_increases_case():
gamma_1 = np.array([0, 4, -2, 0])
tau_1 = np.array([0, 4, -4, 0])
et_1 = ShearTest(tau_1, gamma_1, 1)
energy_1 = assess.calc_case_et(et_1)
gamma_2 = np.array([0, 4, 3, 4, -2, 0])
tau_2 = np.array([0, 4, 1, 1, -4, 0])
et_2 = ShearTest(tau_2, gamma_2, 1)
energy_2 = assess.calc_case_et(et_2)
assert energy_2[-1] > energy_1[-1]
def skip_test_strain_bulge_behaviour_increases_case():
gamma_1 = np.array([0, 4, -2, 0])
tau_1 = np.array([0, 4, -4, 0])
et_1 = ShearTest(tau_1, gamma_1, 1)
energy_1 = assess.calc_case_et(et_1)
gamma_2 = np.array([0, 4, 4.1, -2, 0])
tau_2 = np.array([0, 4, 1, -4, 0])
et_2 = ShearTest(tau_2, gamma_2, 1)
energy_2 = assess.calc_case_et(et_2)
assert energy_2[-1] > energy_1[-1]
def test_determine_cum_stored_energy_series_simple_up_down():
"""
/\
:return:
"""
gamma = np.array([0., 1., 0.5])
tau = np.array([0., 1., 0])
expected_delta_e = 0.75 # two triangles (1x1x0.5 + 1x0.5x0.5)
et = ShearTest(tau, gamma)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_delta_e, energy
def test_determine_cum_stored_energy_series_simple_up_down_neg():
gamma = np.array([0., 1., -1])
tau = np.array([0., 1., -1])
expected_delta_e = 1.5
et = ShearTest(tau, gamma)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_delta_e, energy
def test_determine_cum_stored_energy_series_simple_close_loop():
gamma = np.array([1., -1, 1])
tau = np.array([1., -1, 1])
expected_delta_e = 2
et = ShearTest(tau, gamma)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_delta_e, energy
def test_determine_cum_stored_energy_series_simple_4points():
gamma = np.array([0, 1, -1, 2])
tau = np.array([0, 1, -1, 1])
step_1 = (0 + 1) / 2 * (1 - 0)
step_2 = (0 + 2) / 2 * (1 - 0)
expected_delta_e = step_1 * 4 + step_2
et = ShearTest(tau, gamma)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_delta_e, (energy, expected_delta_e)
def test_determine_cum_stored_energy_series_simple_trapz_zero():
gamma = np.array([0, 2, 1])
tau = np.array([0, 2, 1])
step_1 = (0 + 2) / 2 * (2 - 0)
step_2 = (2 + 1) / 2 * abs(2 - 1)
expected_delta_e = step_1 + step_2
et = ShearTest(tau, gamma)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_delta_e, (energy, expected_delta_e)
def test_determine_cum_stored_energy_series_simple_trapz():
gamma = np.array([1, 3, 2])
tau = np.array([1, 2, 0])
step_1 = (0 + 1) / 2 * (2 - 0)
step_2 = (2 + 1) / 2 * abs(2 - 0)
expected_delta_e = step_1 + step_2
et = ShearTest(tau, gamma)
energy = assess.calc_case_et(et)
assert energy[-1] == expected_delta_e, (energy, expected_delta_e)
def test_determine_cum_stored_energy_series_simple_5points():
gamma = | np.array([0, 2, 1, 3, 2]) | numpy.array |
from argparse import Namespace
import GPy
import heapq
import numpy as np
from rdkit import Chem
from sklearn.ensemble import RandomForestRegressor
import torch.nn as nn
from typing import Any, Callable, List, Tuple
from .predict import predict
from chemprop.data import MoleculeDataset, StandardScaler
from chemprop.features import morgan_binary_features_generator as morgan
class UncertaintyEstimator:
"""
An UncertaintyEstimator calculates uncertainty when passed a model.
Certain UncertaintyEstimators also augment the model and alter prediction
values. Note that many UncertaintyEstimators compute unscaled uncertainty
values. These are only meaningful relative to one another.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
"""
Constructs an UncertaintyEstimator.
:param train_data: The data a model was trained on.
:param val_data: The validation/supplementary data for a model.
:param test_data: The data to test the model with.
:param scaler: A scaler the model uses to transform input data.
:param args: The command line arguments.
"""
self.train_data = train_data
self.val_data = val_data
self.test_data = test_data
self.scaler = scaler
self.args = args
def process_model(self, model: nn.Module):
"""Perform initialization using model and prior data.
:param model: The model to learn the uncertainty of.
"""
pass
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Compute uncertainty on self.val_data and self.test_data predictions.
:param val_predictions: The predictions made on self.val_data.
:param test_predictions: The predictions made on self.test_data.
:return: Validation set predictions, validation set uncertainty,
test set predictions, and test set uncertainty.
"""
pass
def _scale_uncertainty(self, uncertainty: float) -> float:
"""
Rescale uncertainty estimates to account for scaled input.
:param uncertainty: An unscaled uncertainty estimate.
:return: A scaled uncertainty estimate.
"""
return self.scaler.stds * uncertainty
class EnsembleEstimator(UncertaintyEstimator):
"""
An EnsembleEstimator trains a collection of models.
Each model is exposed to all training data.
On any input, a single prediction is calculated by taking the mean of
model outputs. Reported uncertainty is the variance of outputs.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
super().__init__(train_data, val_data, test_data, scaler, args)
self.all_val_preds = None
self.all_test_preds = None
def process_model(self, model: nn.Module):
val_preds = predict(
model=model,
data=self.val_data,
batch_size=self.args.batch_size,
scaler=self.scaler,
)
test_preds = predict(
model=model,
data=self.test_data,
batch_size=self.args.batch_size,
scaler=self.scaler,
)
reshaped_val_preds = np.array(val_preds).reshape(
(len(self.val_data.smiles()), self.args.num_tasks, 1))
if self.all_val_preds is not None:
self.all_val_preds = np.concatenate(
(self.all_val_preds, reshaped_val_preds), axis=2)
else:
self.all_val_preds = reshaped_val_preds
reshaped_test_preds = np.array(test_preds).reshape(
(len(self.test_data.smiles()), self.args.num_tasks, 1))
if self.all_test_preds is not None:
self.all_test_preds = np.concatenate(
(self.all_test_preds, reshaped_test_preds), axis=2)
else:
self.all_test_preds = reshaped_test_preds
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
val_uncertainty = np.sqrt(np.var(self.all_val_preds, axis=2))
test_uncertainty = np.sqrt(np.var(self.all_test_preds, axis=2))
return (val_predictions,
val_uncertainty,
test_predictions,
test_uncertainty)
class BootstrapEstimator(EnsembleEstimator):
"""
A BootstrapEstimator trains a collection of models.
Each model is exposed to only a subset of training data.
On any input, a single prediction is calculated by taking the mean of
model outputs. Reported uncertainty is the variance of outputs.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
super().__init__(train_data, val_data, test_data, scaler, args)
class SnapshotEstimator(EnsembleEstimator):
"""
A SnapshotEstimator trains a collection of models.
Each model is produced by storing a single NN's
weight at a different epochs in training.
On any input, a single prediction is calculated by taking the mean of
model outputs. Reported uncertainty is the variance of outputs.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
super().__init__(train_data, val_data, test_data, scaler, args)
class DropoutEstimator(EnsembleEstimator):
"""
A DropoutEstimator trains a collection of models.
The prediction of each 'model' is calculating by dropping out a random
subset of nodes from a single NN.
On any input, a single prediction is calculated by taking the mean of
model outputs. Reported uncertainty is the variance of outputs.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
super().__init__(train_data, val_data, test_data, scaler, args)
class MVEEstimator(UncertaintyEstimator):
"""
An MVEEstimator alters NN structure to produce twice as many outputs.
Half correspond to predicted labels and half correspond to uncertainties.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
super().__init__(train_data, val_data, test_data, scaler, args)
self.sum_val_uncertainty = np.zeros(
(len(val_data.smiles()), args.num_tasks))
self.sum_test_uncertainty = np.zeros(
(len(test_data.smiles()), args.num_tasks))
def process_model(self, model: nn.Module):
val_preds, val_uncertainty = predict(
model=model,
data=self.val_data,
batch_size=self.args.batch_size,
scaler=self.scaler,
uncertainty=True
)
if len(val_preds) != 0:
self.sum_val_uncertainty += np.array(val_uncertainty).clip(min=0)
test_preds, test_uncertainty = predict(
model=model,
data=self.test_data,
batch_size=self.args.batch_size,
scaler=self.scaler,
uncertainty=True
)
if len(test_preds) != 0:
self.sum_test_uncertainty += np.array(test_uncertainty).clip(min=0)
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
return (val_predictions,
np.sqrt(self.sum_val_uncertainty / self.args.ensemble_size),
test_predictions,
np.sqrt(self.sum_test_uncertainty / self.args.ensemble_size))
class ExposureEstimator(UncertaintyEstimator):
"""
An ExposureEstimator drops the output layer
of the provided model after training.
The "exposed" final hidden-layer is used to calculate uncertainty.
"""
def __init__(self,
train_data: MoleculeDataset,
val_data: MoleculeDataset,
test_data: MoleculeDataset,
scaler: StandardScaler,
args: Namespace):
super().__init__(train_data, val_data, test_data, scaler, args)
self.sum_last_hidden_train = np.zeros(
(len(self.train_data.smiles()), self.args.last_hidden_size))
self.sum_last_hidden_val = np.zeros(
(len(self.val_data.smiles()), self.args.last_hidden_size))
self.sum_last_hidden_test = np.zeros(
(len(self.test_data.smiles()), self.args.last_hidden_size))
def process_model(self, model: nn.Module):
model.eval()
model.use_last_hidden = False
last_hidden_train = predict(
model=model,
data=self.train_data,
batch_size=self.args.batch_size,
scaler=None
)
self.sum_last_hidden_train += np.array(last_hidden_train)
last_hidden_val = predict(
model=model,
data=self.val_data,
batch_size=self.args.batch_size,
scaler=None
)
self.sum_last_hidden_val += np.array(last_hidden_val)
last_hidden_test = predict(
model=model,
data=self.test_data,
batch_size=self.args.batch_size,
scaler=None
)
self.sum_last_hidden_test += np.array(last_hidden_test)
def _compute_hidden_vals(self):
ensemble_size = self.args.ensemble_size
avg_last_hidden_train = self.sum_last_hidden_train / ensemble_size
avg_last_hidden_val = self.sum_last_hidden_val / ensemble_size
avg_last_hidden_test = self.sum_last_hidden_test / ensemble_size
return avg_last_hidden_train, avg_last_hidden_val, avg_last_hidden_test
class GaussianProcessEstimator(ExposureEstimator):
"""
A GaussianProcessEstimator trains a Gaussian process to
operate on data transformed by the provided model.
Uncertainty and predictions are calculated using
the output of the Gaussian process.
"""
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
(_,
avg_last_hidden_val,
avg_last_hidden_test) = self._compute_hidden_vals()
val_predictions = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
val_uncertainty = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
test_predictions = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
test_uncertainty = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
transformed_val = self.scaler.transform(
np.array(self.val_data.targets()))
for task in range(self.args.num_tasks):
kernel = GPy.kern.Linear(input_dim=self.args.last_hidden_size)
gaussian = GPy.models.SparseGPRegression(
avg_last_hidden_val,
transformed_val[:, task:task + 1], kernel)
gaussian.optimize()
avg_val_preds, avg_val_var = gaussian.predict(
avg_last_hidden_val)
val_predictions[:, task:task + 1] = avg_val_preds
val_uncertainty[:, task:task + 1] = np.sqrt(avg_val_var)
avg_test_preds, avg_test_var = gaussian.predict(
avg_last_hidden_test)
test_predictions[:, task:task + 1] = avg_test_preds
test_uncertainty[:, task:task + 1] = np.sqrt(avg_test_var)
val_predictions = self.scaler.inverse_transform(val_predictions)
test_predictions = self.scaler.inverse_transform(test_predictions)
return (val_predictions, self._scale_uncertainty(val_uncertainty),
test_predictions, self._scale_uncertainty(test_uncertainty))
class FPGaussianProcessEstimator(UncertaintyEstimator):
"""
An FPGaussianProcessEstimator trains a Gaussian process on the
morgan fingerprints of provided training data.
Uncertainty and predictions are calculated using
the output of the Gaussian process.
"""
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
train_smiles = self.train_data.smiles()
val_smiles = self.val_data.smiles()
test_smiles = self.test_data.smiles()
# Train targets are already scaled.
scaled_train_targets = np.array(self.train_data.targets())
train_fps = np.array([morgan(s) for s in train_smiles])
val_fps = np.array([morgan(s) for s in val_smiles])
test_fps = np.array([morgan(s) for s in test_smiles])
val_predictions = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
val_uncertainty = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
test_predictions = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
test_uncertainty = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
for task in range(self.args.num_tasks):
kernel = GPy.kern.Linear(input_dim=train_fps.shape[1])
gaussian = GPy.models.SparseGPRegression(
train_fps,
scaled_train_targets[:, task:task + 1], kernel)
gaussian.optimize()
val_preds, val_var = gaussian.predict(
val_fps)
val_predictions[:, task:task + 1] = val_preds
val_uncertainty[:, task:task + 1] = np.sqrt(val_var)
test_preds, test_var = gaussian.predict(
test_fps)
test_predictions[:, task:task + 1] = test_preds
test_uncertainty[:, task:task + 1] = np.sqrt(test_var)
val_predictions = self.scaler.inverse_transform(val_predictions)
test_predictions = self.scaler.inverse_transform(test_predictions)
return (val_predictions, self._scale_uncertainty(val_uncertainty),
test_predictions, self._scale_uncertainty(test_uncertainty))
class RandomForestEstimator(ExposureEstimator):
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
A RandomForestEstimator trains a random forest to
operate on data transformed by the provided model.
Predictions are calculated using the output of the random forest.
Reported uncertainty is the variance of trees in the forest.
"""
(_,
avg_last_hidden_val,
avg_last_hidden_test) = self._compute_hidden_vals()
val_predictions = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
val_uncertainty = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
test_predictions = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
test_uncertainty = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
transformed_val = self.scaler.transform(
np.array(self.val_data.targets()))
n_trees = 128
for task in range(self.args.num_tasks):
forest = RandomForestRegressor(n_estimators=n_trees)
forest.fit(avg_last_hidden_val, transformed_val[:, task])
avg_val_preds = forest.predict(avg_last_hidden_val)
val_predictions[:, task] = avg_val_preds
individual_val_predictions = np.array([estimator.predict(
avg_last_hidden_val) for estimator in forest.estimators_])
val_uncertainty[:, task] = np.std(individual_val_predictions,
axis=0)
avg_test_preds = forest.predict(avg_last_hidden_test)
test_predictions[:, task] = avg_test_preds
individual_test_predictions = np.array([estimator.predict(
avg_last_hidden_test) for estimator in forest.estimators_])
test_uncertainty[:, task] = np.std(individual_test_predictions,
axis=0)
val_predictions = self.scaler.inverse_transform(val_predictions)
test_predictions = self.scaler.inverse_transform(test_predictions)
return (val_predictions, self._scale_uncertainty(val_uncertainty),
test_predictions, self._scale_uncertainty(test_uncertainty))
class FPRandomForestEstimator(UncertaintyEstimator):
"""
An FPRandomForestEstimator trains a random forest on the
morgan fingerprints of provided training data.
Predictions are calculated using the output of the random forest.
Reported uncertainty is the variance of trees in the forest.
"""
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
train_smiles = self.train_data.smiles()
val_smiles = self.val_data.smiles()
test_smiles = self.test_data.smiles()
# Train targets are already scaled.
scaled_train_targets = np.array(self.train_data.targets())
train_fps = np.array([morgan(s) for s in train_smiles])
val_fps = np.array([morgan(s) for s in val_smiles])
test_fps = np.array([morgan(s) for s in test_smiles])
val_predictions = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
val_uncertainty = np.ndarray(
shape=(len(self.val_data.smiles()), self.args.num_tasks))
test_predictions = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
test_uncertainty = np.ndarray(
shape=(len(self.test_data.smiles()), self.args.num_tasks))
n_trees = 128
for task in range(self.args.num_tasks):
forest = RandomForestRegressor(n_estimators=n_trees)
forest.fit(train_fps, scaled_train_targets[:, task])
avg_val_preds = forest.predict(val_fps)
val_predictions[:, task] = avg_val_preds
individual_val_predictions = np.array([estimator.predict(
val_fps) for estimator in forest.estimators_])
val_uncertainty[:, task] = np.std(individual_val_predictions,
axis=0)
avg_test_preds = forest.predict(test_fps)
test_predictions[:, task] = avg_test_preds
individual_test_predictions = np.array([estimator.predict(
test_fps) for estimator in forest.estimators_])
test_uncertainty[:, task] = np.std(individual_test_predictions,
axis=0)
val_predictions = self.scaler.inverse_transform(val_predictions)
test_predictions = self.scaler.inverse_transform(test_predictions)
return (val_predictions, self._scale_uncertainty(val_uncertainty),
test_predictions, self._scale_uncertainty(test_uncertainty))
class LatentSpaceEstimator(ExposureEstimator):
"""
A LatentSpaceEstimator uses the latent space distance between
a molecule and its kNN in the training set to calculate uncertainty.
"""
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
(avg_last_hidden_train,
avg_last_hidden_val,
avg_last_hidden_test) = self._compute_hidden_vals()
val_uncertainty = np.zeros((len(self.val_data.smiles()),
self.args.num_tasks))
test_uncertainty = np.zeros((len(self.test_data.smiles()),
self.args.num_tasks))
for val_input in range(len(avg_last_hidden_val)):
distances = np.zeros(len(avg_last_hidden_train))
for train_input in range(len(avg_last_hidden_train)):
difference = avg_last_hidden_val[
val_input] - avg_last_hidden_train[train_input]
distances[train_input] = np.sqrt(
np.sum(difference * difference))
val_uncertainty[val_input, :] = kNN(distances, 8)
for test_input in range(len(avg_last_hidden_test)):
distances = np.zeros(len(avg_last_hidden_train))
for train_input in range(len(avg_last_hidden_train)):
difference = avg_last_hidden_test[
test_input] - avg_last_hidden_train[train_input]
distances[train_input] = np.sqrt(
np.sum(difference * difference))
test_uncertainty[test_input, :] = kNN(distances, 8)
return (val_predictions,
val_uncertainty,
test_predictions,
test_uncertainty)
class TanimotoEstimator(UncertaintyEstimator):
"""
A TanimotoEstimator uses the tanimoto distance between
a molecule and its kNN in the training set to calculate uncertainty.
"""
def compute_uncertainty(self,
val_predictions: np.ndarray,
test_predictions: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
train_smiles = self.train_data.smiles()
val_smiles = self.val_data.smiles()
test_smiles = self.test_data.smiles()
val_uncertainty = np.ndarray(
shape=(len(val_smiles), self.args.num_tasks))
test_uncertainty = np.ndarray(
shape=(len(test_smiles), self.args.num_tasks))
train_smiles_sfp = [morgan(s) for s in train_smiles]
for i in range(len(val_smiles)):
val_uncertainty[i, :] = np.ones((self.args.num_tasks)) * tanimoto(
val_smiles[i], train_smiles_sfp, lambda x: kNN(x, 8))
for i in range(len(test_smiles)):
test_uncertainty[i, :] = np.ones((self.args.num_tasks)) * tanimoto(
test_smiles[i], train_smiles_sfp, lambda x: kNN(x, 8))
return (val_predictions,
val_uncertainty,
test_predictions,
test_uncertainty)
def tanimoto(smile: str, train_smiles_sfp: np.ndarray, operation: Callable) \
-> Any:
"""
Computes the tanimoto distances between a
molecule and elements of the training set.
:param smile: The SMILES string of the molecule of interest.
:param train_smiles_sfp: The fingerprints of training set elements.
:param operation: Some function used to process computed distances.
"""
smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smile))
fp = morgan(smiles)
tanimoto_distance = []
for sfp in train_smiles_sfp:
tsim = np.dot(fp, sfp) / (fp.sum() +
sfp.sum() - | np.dot(fp, sfp) | numpy.dot |
"""
Compute lambdas for THC according to
PRX QUANTUM 2, 030305 (2021) Section II. D.
"""
import numpy as np
from chemftr.molecule import pyscf_to_cas
def compute_lambda(pyscf_mf, etaPp: np.ndarray, MPQ: np.ndarray, use_eri_thc_for_t=False):
"""
Compute lambda thc
Args:
pyscf_mf - PySCF mean field object
etaPp - leaf tensor for THC that is dim(nthc x norb). The nthc and norb is
inferred from this quantity.
MPQ - central tensor for THC factorization. dim(nthc x nthc)
Returns:
"""
nthc = etaPp.shape[0]
# grab tensors from pyscf_mf object
h1, eri_full, _, _, _ = pyscf_to_cas(pyscf_mf)
# computing Least-squares THC residual
CprP = np.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
BprQ = np.tensordot(CprP, MPQ, axes=([2], [0]))
Iapprox = np.tensordot(CprP, np.transpose(BprQ), axes=([2], [0]))
deri = eri_full - Iapprox
res = 0.5 * np.sum((deri) ** 2)
# NOTE: remove in future once we resolve why it was being used in the first place.
# NOTE: see T construction for details.
eri_thc = np.einsum("Pp,Pr,Qq,Qs,PQ->prqs", etaPp, etaPp, etaPp, etaPp, MPQ, optimize=True)
# projecting into the THC basis requires each THC factor mu to be be normalized.
# we roll the normalization constant into the central tensor zeta
SPQ = etaPp.dot(etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = np.diag(np.diag(SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu) on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = np.sum( | np.abs(MPQ_normalized) | numpy.abs |
import os
import json
import ecco
from IPython import display as d
from ecco import util, lm_plots
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.nn import functional as F
from sklearn import decomposition
from typing import Optional, List
class OutputSeq:
def __init__(self,
token_ids=None,
n_input_tokens=None,
tokenizer=None,
output_text=None,
tokens=None,
hidden_states=None,
attribution=None,
activations=None,
activations_type=None,
attention=None,
model_outputs=None,
lm_head=None,
device='cpu'):
self.token_ids = token_ids
self.tokenizer = tokenizer
self.n_input_tokens = n_input_tokens
self.output_text = output_text
self.tokens = tokens
self.hidden_states = hidden_states
self.attribution = attribution
self.activations = activations
self.activations_type = activations_type
self.model_outputs = model_outputs
self.attention_values = attention
self.lm_head = lm_head
self.device = device
self._path = os.path.dirname(ecco.__file__)
def __str__(self):
return "<LMOutput '{}' # of lm outputs: {}>".format(self.output_text, len(self.hidden_states))
def to(self, tensor: torch.Tensor):
if self.device == 'cuda':
return tensor.to('cuda')
return tensor
def explorable(self, printJson: Optional[bool] = False):
tokens = []
for idx, token in enumerate(self.tokens):
type = "input" if idx < self.n_input_tokens else 'output'
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type
})
data = {
'tokens': tokens
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.renderOutputSequence(viz_id, {})
}}, function (err) {{
console.log(err);
}})""".format(data)
d.display(d.Javascript(js))
if printJson:
print(data)
def __call__(self, position=None, **kwargs):
if position is not None:
self.position(position, **kwargs)
else:
self.saliency(**kwargs)
def position(self, position, attr_method='grad_x_input'):
if (position < self.n_input_tokens) or (position > len(self.tokens) - 1):
raise ValueError("'position' should indicate a position of a generated token. "
"Accepted values for this sequence are between {} and {}."
.format(self.n_input_tokens, len(self.tokens) - 1))
importance_id = position - self.n_input_tokens
tokens = []
attribution = self.attribution[attr_method]
for idx, token in enumerate(self.tokens):
type = "input" if idx < self.n_input_tokens else 'output'
if idx < len(attribution[importance_id]):
imp = attribution[importance_id][idx]
else:
imp = -1
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
'value': str(imp) # because json complains of floats
})
data = {
'tokens': tokens
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.renderSeqHighlightPosition(viz_id, {}, {})
}}, function (err) {{
console.log(err);
}})""".format(position, data)
d.display(d.Javascript(js))
def saliency(self, attr_method: Optional[str] = 'grad_x_input', style="minimal", **kwargs):
"""
Explorable showing saliency of each token generation step.
Hovering-over or tapping an output token imposes a saliency map on other tokens
showing their importance as features to that prediction.
"""
position = self.n_input_tokens
importance_id = position - self.n_input_tokens
tokens = []
attribution = self.attribution[attr_method]
for idx, token in enumerate(self.tokens):
type = "input" if idx < self.n_input_tokens else 'output'
if idx < len(attribution[importance_id]):
imp = attribution[importance_id][idx]
else:
imp = 0
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
'value': str(imp), # because json complains of floats
'position': idx
})
data = {
'tokens': tokens,
'attributions': [att.tolist() for att in attribution]
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
# viz_id = 'viz_{}'.format(round(random.random() * 1000000))
if( style == "minimal"):
js = f"""
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
// ecco.interactiveTokens(viz_id, {{}})
window.ecco[viz_id] = new ecco.MinimalHighlighter({{
parentDiv: viz_id,
data: {data},
preset: 'viridis'
}})
window.ecco[viz_id].init();
window.ecco[viz_id].selectFirstToken();
}}, function (err) {{
console.log(err);
}})"""
elif (style == "detailed"):
js = f"""
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
window.ecco[viz_id] = ecco.interactiveTokens(viz_id, {data})
}}, function (err) {{
console.log(err);
}})"""
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
return data
def _repr_html_(self, **kwargs):
# if util.type_of_script() == "jupyter":
self.explorable(**kwargs)
return '<OutputSeq>'
# else:
# return "<OutputSeq Generated tokens: {}. \nFull sentence:'{}' \n# of lm outputus: {}\nTokens:\n{}>" \
# .format(self.tokens[self.n_input_tokens:],
# self.output_text,
# len(self.outputs),
# ', '.join(["{}:'{}'".format(idx, t) for idx, t in enumerate(self.tokens)]))
def plot_feature_importance_barplots(self):
"""
Barplot showing the improtance of each input token. Prints one barplot
for each generated token.
TODO: This should be LMOutput I think
:return:
"""
printable_tokens = [repr(token) for token in self.tokens]
for i in self.importance:
importance = i.numpy()
lm_plots.token_barplot(printable_tokens, importance)
# print(i.numpy())
plt.show()
def layer_predictions(self, position: int = 0, topk: Optional[int] = 10, layer: Optional[int] = None, **kwargs):
"""
Visualization plotting the topk predicted tokens after each layer (using its hidden state).
:param output: OutputSeq object generated by LM.generate()
:param position: The index of the output token to trace
:param topk: Number of tokens to show for each layer
:param layer: None shows all layers. Can also pass an int with the layer id to show only that layer
"""
watch = self.to(torch.tensor([self.token_ids[self.n_input_tokens]]))
# There is one lm output per generated token. To get the index
output_index = position - self.n_input_tokens
if layer is not None:
hidden_states = self.hidden_states[layer + 1].unsqueeze(0)
else:
hidden_states = self.hidden_states[1:] # Ignore the first element (embedding)
k = topk
top_tokens = []
probs = []
data = []
print('Predictions for position {}'.format(position))
for layer_no, h in enumerate(hidden_states):
# print(h.shape)
hidden_state = h[position - 1]
# Use lm_head to project the layer's hidden state to output vocabulary
logits = self.lm_head(hidden_state)
softmax = F.softmax(logits, dim=-1)
sorted_softmax = self.to(torch.argsort(softmax))
# Not currently used. If we're "watching" a specific token, this gets its ranking
# idx = sorted_softmax.shape[0] - torch.nonzero((sorted_softmax == watch)).flatten()
layer_top_tokens = [self.tokenizer.decode([t]) for t in sorted_softmax[-k:]][::-1]
top_tokens.append(layer_top_tokens)
layer_probs = softmax[sorted_softmax[-k:]].cpu().detach().numpy()[::-1]
probs.append(layer_probs.tolist())
# Package in output format
layer_data = []
for idx, (token, prob) in enumerate(zip(layer_top_tokens, layer_probs)):
# print(layer_no, idx, token)
layer_num = layer if layer is not None else layer_no
layer_data.append({'token': token,
'prob': str(prob),
'ranking': idx + 1,
'layer': layer_num
})
data.append(layer_data)
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = f"""
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
let pred = new ecco.LayerPredictions({{
parentDiv: viz_id,
data:{json.dumps(data)}
}})
pred.init()
}}, function (err) {{
console.log(viz_id, err);
}})"""
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
return data
def rankings(self, **kwargs):
"""
Plots the rankings (across layers) of the tokens the model selected.
Each column is a position in the sequence. Each row is a layer.
"""
hidden_states = self.hidden_states
n_layers = len(hidden_states)
position = hidden_states[0].shape[0] - self.n_input_tokens + 1
# print('position', position)
predicted_tokens = np.empty((n_layers - 1, position), dtype='U25')
rankings = np.zeros((n_layers - 1, position), dtype=np.int32)
token_found_mask = np.ones((n_layers - 1, position))
# loop through layer levels
for i, level in enumerate(hidden_states[1:]):
# Loop through generated/output positions
for j, hidden_state in enumerate(level[self.n_input_tokens - 1:]):
# print('hidden state layer', i, 'position', self.n_input_tokens-1+j)
# Project hidden state to vocabulary
# (after debugging pain: ensure input is on GPU, if appropriate)
logits = self.lm_head(hidden_state)
# logits = self.lm_head(torch.tensor(hidden_state))
# Sort by score (ascending)
sorted = torch.argsort(logits)
# What token was sampled in this position?
token_id = torch.tensor(self.token_ids[self.n_input_tokens + j])
# print('token_id', token_id)
# What's the index of the sampled token in the sorted list?
r = torch.nonzero((sorted == token_id)).flatten()
# subtract to get ranking (where 1 is the top scoring, because sorting was in ascending order)
ranking = sorted.shape[0] - r
# print('ranking', ranking)
# token_id = torch.argmax(sm)
token = self.tokenizer.decode([token_id])
predicted_tokens[i, j] = token
rankings[i, j] = int(ranking)
# print('layer', i, 'position', j, 'top1', token_id, 'actual label', output['token_ids'][j]+1)
if token_id == self.token_ids[j + 1]:
token_found_mask[i, j] = 0
input_tokens = [repr(t) for t in self.tokens[self.n_input_tokens - 1:-1]]
output_tokens = [repr(t) for t in self.tokens[self.n_input_tokens:]]
# print('in out', input_tokens, output_tokens)
lm_plots.plot_inner_token_rankings(input_tokens,
output_tokens,
rankings,
**kwargs)
if 'printJson' in kwargs and kwargs['printJson']:
data = {'input_tokens': input_tokens,
'output_tokens': output_tokens,
'rankings': rankings,
'predicted_tokens': predicted_tokens}
print(data)
return data
def rankings_watch(self, watch: List[int] = None, position: int = -1, **kwargs):
"""
Plots the rankings of the tokens whose ids are supplied in the watch list.
Only considers one position.
"""
if position != -1:
position = position - 1 # e.g. position 5 corresponds to activation 4
hidden_states = self.hidden_states
n_layers = len(hidden_states)
n_tokens_to_watch = len(watch)
# predicted_tokens = np.empty((n_layers - 1, n_tokens_to_watch), dtype='U25')
rankings = np.zeros((n_layers - 1, n_tokens_to_watch), dtype=np.int32)
# loop through layer levels
for i, level in enumerate(hidden_states[1:]): # Skip the embedding layer
# Loop through generated/output positions
for j, token_id in enumerate(watch):
hidden_state = level[position]
# Project hidden state to vocabulary
# (after debugging pain: ensure input is on GPU, if appropriate)
logits = self.lm_head(hidden_state)
# logits = lmhead(torch.tensor(hidden_state))
# Sort by score (ascending)
sorted = torch.argsort(logits)
# What token was sampled in this position?
token_id = torch.tensor(token_id)
# print('token_id', token_id)
# What's the index of the sampled token in the sorted list?
r = torch.nonzero((sorted == token_id)).flatten()
# subtract to get ranking (where 1 is the top scoring, because sorting was in ascending order)
ranking = sorted.shape[0] - r
# print('ranking', ranking)
# token_id = torch.argmax(sm)
# token = self.tokenizer.decode([token_id])
# predicted_tokens[i, j] = token
rankings[i, j] = int(ranking)
# print('layer', i, 'position', j, 'top1', token_id, 'actual label', output['token_ids'][j]+1)
# if token_id == self.token_ids[j + 1]:
# token_found_mask[i, j] = 0
input_tokens = [t for t in self.tokens]
output_tokens = [repr(self.tokenizer.decode(t)) for t in watch]
# print('in out', input_tokens, output_tokens)
lm_plots.plot_inner_token_rankings_watch(input_tokens,
output_tokens,
rankings)
if 'printJson' in kwargs and kwargs['printJson']:
data = {'input_tokens': input_tokens,
'output_tokens': output_tokens,
'rankings': rankings}
print(data)
return data
def run_nmf(self, **kwargs):
"""
Run Non-negative Matrix Factorization on network activations of FFNN.
Saves the components in self.components
"""
return NMF(self.activations,
n_input_tokens=self.n_input_tokens,
token_ids=self.token_ids,
_path=self._path,
tokens=self.tokens, **kwargs)
def attention(self, attention_values=None, layer=0, **kwargs):
position = self.n_input_tokens
# importance_id = position - self.n_input_tokens
importance_id = self.n_input_tokens - 1 # Sete first values to first output token
tokens = []
if attention_values:
attn = attention_values
else:
attn = self.attention_values[layer]
# normalize attention heads
attn = attn.sum(axis=1) / attn.shape[1]
for idx, token in enumerate(self.tokens):
# print(idx, attn.shape)
type = "input" if idx < self.n_input_tokens else 'output'
if idx < len(attn[0][importance_id]):
attention_value = attn[0][importance_id][idx].cpu().detach().numpy()
else:
attention_value = 0
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
'value': str(attention_value), # because json complains of floats
'position': idx
})
data = {
'tokens': tokens,
'attributions': [att.tolist() for att in attn[0].cpu().detach().numpy()]
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.interactiveTokens(viz_id, {})
}}, function (err) {{
console.log(err);
}})""".format(data)
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
class NMF:
" Conducts NMF and holds the models and components "
def __init__(self, activations: np.ndarray,
n_input_tokens: int = 0,
token_ids: torch.Tensor = torch.Tensor(0),
_path: str = '',
n_components: int = 10,
# from_layer: Optional[int] = None,
# to_layer: Optional[int] = None,
tokens: Optional[List[str]] = None,
**kwargs):
self._path = _path
self.token_ids = token_ids
self.n_input_tokens = n_input_tokens
from_layer = kwargs['from_layer'] if 'from_layer' in kwargs else None
to_layer = kwargs['to_layer'] if 'to_layer' in kwargs else None
if len(activations.shape) != 3:
raise ValueError(f"The 'activations' parameter should have three dimensions: (layers, neurons, positions). "
f"Supplied dimensions: {activations.shape}", 'activations')
if from_layer is not None or to_layer is not None:
from_layer = from_layer if from_layer is not None else 0
to_layer = to_layer if to_layer is not None else activations.shape[0]
if from_layer == to_layer:
raise ValueError(f"from_layer ({from_layer}) and to_layer ({to_layer}) cannot be the same value. "
"They must be apart by at least one to allow for a layer of activations.")
if from_layer > to_layer:
raise ValueError(f"from_layer ({from_layer}) cannot be larger than to_layer ({to_layer}).")
else:
from_layer = 0
to_layer = activations.shape[0]
merged_act = np.concatenate(activations[from_layer: to_layer], axis=0)
activations = np.expand_dims(merged_act, axis=0)
self.tokens = tokens
" Run NMF. Activations is neuron activations shaped (layers, neurons, positions)"
n_output_tokens = activations.shape[-1]
n_layers = activations.shape[0]
n_components = min([n_components, n_output_tokens])
components = np.zeros((n_layers, n_components, n_output_tokens))
models = []
# Get rid of negative activation values
# (There are some, because GPT2 uses GLEU, which allow small negative values)
activations = | np.maximum(activations, 0) | numpy.maximum |
"""Tests for the prediction utilities."""
import numpy as np
import tensorflow as tf
import aboleth as ab
from aboleth.layers import _sample_W
def test_sample_mean():
X = np.arange(10, dtype=float).reshape((2, 5))
true = X.mean(axis=0)
mean = ab.sample_mean(X)
tc = tf.test.TestCase()
with tc.test_session():
assert np.all(true == mean.eval())
def test_sample_quantiles():
X = np.arange(100, dtype=float).reshape((10, 10))
# NOTE: numpy takes lower nearest, tensorflow takes higher nearest when
# equidistant
true = | np.percentile(X, q=[10, 51, 90], axis=0, interpolation='nearest') | numpy.percentile |
import sklearn
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
ground_truth_data_df = pd.DataFrame([
('agde', '<NAME>',13,'M',36930),
('altamira','<NAME>',26,'M',97835),
('amanda', 'Mme <NAME>',17,'F',60381),
('appert', '<NAME>',8,'M',2363),
('castanède','<NAME>',19,'M',64861),
('caylus','<NAME>',24,'M',94378),
('chélan','<NAME>',6,'M',1929),
('croisenois','<NAME>',23,'M',94374),
('danton','<NAME>',1,'M', 15),
('derville','<NAME>',12,'F',13130),
('falcoz','<NAME>',14,'M',45151),
('fervaques','<NAME>',25,'F',96924),
('fouqué','<NAME>',10,'M',7451),
('frilair','<NAME>',15,'M',53833),
('geronimo','<NAME>',16,'M',55797),
('korasoff','<NAME>',27,'M',102772),
('julien','<NAME>',3,'M',4751),
('louise','<NAME>',7,'F',45391),
('maslon','<NAME>',5,'M',1900),
('mathilde','<NAME>',21,'F',90709),
('norbert','<NAME>',20,'M',87123),
('pirard','<NAME>',18,'M',62166),
('rênal','<NAME>',2,'M', 605),
('rênal','<NAME>',7,'F', 2214),
('sorel','<NAME>',3,'M', 940),
('tanbeau','<NAME>',22,'M',92323),
('valenod','<NAME>',4,'M',1724),
('élisa','<NAME>',11,'F',12267),
('mole', '<NAME>', 21,'F',90768),
('mole', '<NAME>',9,'M',2610)],
columns=['name', 'entity','entity_ID', 'gender','first_appearance' ])
def get_clustering_metrics(embeddings, embeddings_type):
'''Given embeddings, and their ground truth data type, computes several clustering performance
metrics. The right `ground_truth_data_df`, `textually_close_ent_ground_truth_df` or
`lax_ent_ground_truth_df` should have been loaded into memory before calling this function.
Parameters
----------
embeddings : dictionary
The dictionary containing each entity and their associated embedding vector
embeddings_type : str
The matching ground truth data type for the given embeddings (either 'first_version',
'textually_close' or 'lax')
Returns
-------
same_entityness : list
A list containing the performance metrics with regards to the 'same_entityness' axis
gender : list
A list containing the performance metrics with regards to the 'gender' axis
first_appearance : list
A list containing the performance metrics with regards to the 'first_appearance' axis
'''
# SAME ENTITY-NESS
same_entityness = []
mask_embs_entity = [(k,
embeddings[k],
ground_truth_data_df[ground_truth_data_df['name'] == k]['entity_ID'].values[0])
for k in embeddings
if k.lower() in ground_truth_data_df['name'].tolist()]
tmp_df = pd.DataFrame(mask_embs_entity)
same_entityness.append(sklearn.metrics.silhouette_score(np.array(tmp_df[1].tolist()),
np.array(tmp_df[2]),
metric='euclidean',
random_state=0))
same_entityness.append(sklearn.metrics.calinski_harabasz_score(np.array(tmp_df[1].tolist()),
np.array(tmp_df[2])))
same_entityness.append(sklearn.metrics.davies_bouldin_score(np.array(tmp_df[1].tolist()),
np.array(tmp_df[2])))
tmp_df = pd.DataFrame(mask_embs_entity)
entityness_matrix = np.array([np.array(emb) for emb in tmp_df[1]])
k_choice = 21 # obtained by the elbow method
kmean = KMeans(n_clusters=k_choice, random_state=0).fit(entityness_matrix, )
predicted_clusters = kmean.predict(np.array([np.array(emb) for emb in tmp_df[1]]))
same_entityness.append(sklearn.metrics.rand_score(np.array(tmp_df[2]), predicted_clusters))
same_entityness.append(sklearn.metrics.adjusted_rand_score(np.array(tmp_df[2]), predicted_clusters))
same_entityness.append(sklearn.metrics.mutual_info_score(np.array(tmp_df[2]), predicted_clusters))
same_entityness.append(sklearn.metrics.adjusted_mutual_info_score(np.array(tmp_df[2]),
predicted_clusters,
average_method='arithmetic'))
# GENDER
gender = []
mask_embs_gender = [(k,
embeddings[k],
ground_truth_data_df[ground_truth_data_df['name'] == k]['gender'].values[0])
for k in embeddings
if k.lower() in ground_truth_data_df['name'].tolist()]
tmp_df = pd.DataFrame(mask_embs_gender)
gender.append(sklearn.metrics.silhouette_score(np.array(tmp_df[1].tolist()),
np.array(tmp_df[2] == 'M').astype(int),
metric='euclidean',
random_state=0))
gender.append(sklearn.metrics.calinski_harabasz_score(np.array(tmp_df[1].tolist()), np.array(tmp_df[2])))
gender.append(sklearn.metrics.davies_bouldin_score(np.array(tmp_df[1].tolist()), np.array(tmp_df[2])))
tmp_df = pd.DataFrame(mask_embs_gender)
gender_matrix = np.array([np.array(emb) for emb in tmp_df[1]])
k_choice = 2 # two genders in PG literature (men and women)
kmean = KMeans(n_clusters=k_choice, random_state=0).fit(gender_matrix)
predicted_clusters = kmean.predict(np.array([np.array(emb) for emb in tmp_df[1]]))
gender.append(sklearn.metrics.rand_score(np.array(tmp_df[2]), predicted_clusters))
gender.append(sklearn.metrics.adjusted_rand_score( | np.array(tmp_df[2]) | numpy.array |
"""Contains most of the methods that compose the ORIGIN software."""
import itertools
import logging
import warnings
from datetime import datetime
from functools import wraps
from time import time
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Gaussian1D
from astropy.nddata import overlap_slices
from astropy.stats import (
gaussian_fwhm_to_sigma,
gaussian_sigma_to_fwhm,
sigma_clipped_stats,
)
from astropy.table import Column, Table, join
from astropy.stats import sigma_clip
from astropy.utils.exceptions import AstropyUserWarning
from joblib import Parallel, delayed
from mpdaf.obj import Image
from mpdaf.tools import progressbar
from numpy import fft
from numpy.linalg import multi_dot
from scipy import fftpack, stats
from scipy.interpolate import interp1d
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import label as ndi_label
from scipy.ndimage import maximum_filter
from scipy.signal import fftconvolve
from scipy.sparse.linalg import svds
from scipy.spatial import ConvexHull, cKDTree
from .source_masks import gen_source_mask
__all__ = (
'add_tglr_stat',
'compute_deblended_segmap',
'Compute_GreedyPCA',
'compute_local_max',
'compute_segmap_gauss',
'compute_thresh_gaussfit',
'Compute_threshold_purity',
'compute_true_purity',
'Correlation_GLR_test',
'create_masks',
'estimation_line',
'merge_similar_lines',
'purity_estimation',
'spatial_segmentation',
'spatiospectral_merging',
'unique_sources',
)
def timeit(f):
"""Decorator which prints the execution time of a function."""
@wraps(f)
def timed(*args, **kw):
logger = logging.getLogger(__name__)
t0 = time()
result = f(*args, **kw)
logger.debug('%s executed in %0.1fs', f.__name__, time() - t0)
return result
return timed
def orthogonal_projection(a, b):
"""Compute the orthogonal projection: a.(a^T.a)-1.a^T.b
NOTE: does not include the (a^T.a)-1 term as it is often not needed (when
a is already normalized).
"""
# Using multi_dot which is faster than np.dot(np.dot(a, a.T), b)
# Another option would be to use einsum, less readable but also very
# fast with Numpy 1.14+ and optimize=True. This seems to be as fast as
# multi_dot.
# return np.einsum('i,j,jk->ik', a, a, b, optimize=True)
if a.ndim == 1:
a = a[:, None]
return multi_dot([a, a.T, b])
@timeit
def spatial_segmentation(Nx, Ny, NbSubcube, start=None):
"""Compute indices to split spatially in NbSubcube x NbSubcube regions.
Each zone is computed from the left to the right and the top to the bottom
First pixel of the first zone has coordinates : (row,col) = (Nx,1).
Parameters
----------
Nx : int
Number of columns
Ny : int
Number of rows
NbSubcube : int
Number of subcubes for the spatial segmentation
start : tuple
if not None, the tupe is the (y,x) starting point
Returns
-------
intx, inty : int, int
limits in pixels of the columns/rows for each zone
"""
# Segmentation of the rows vector in Nbsubcube parts from right to left
inty = np.linspace(Ny, 0, NbSubcube + 1, dtype=np.int)
# Segmentation of the columns vector in Nbsubcube parts from left to right
intx = np.linspace(0, Nx, NbSubcube + 1, dtype=np.int)
if start is not None:
inty += start[0]
intx += start[1]
return inty, intx
def DCTMAT(nl, order):
"""Return the DCT transformation matrix of size nl-by-(order+1).
Equivalent function to Matlab/Octave's dtcmtx.
https://octave.sourceforge.io/signal/function/dctmtx.html
Parameters
----------
order : int
Order of the DCT (spectral length).
Returns
-------
array: DCT Matrix
"""
yy, xx = np.mgrid[:nl, : order + 1]
D0 = np.sqrt(2 / nl) * np.cos((yy + 0.5) * (np.pi / nl) * xx)
D0[:, 0] *= 1 / np.sqrt(2)
return D0
@timeit
def dct_residual(w_raw, order, var, approx, mask):
"""Function to compute the residual of the DCT on raw data.
Parameters
----------
w_raw : array
Data array.
order : int
The number of atom to keep for the DCT decomposition.
var : array
Variance array.
approx : bool
If True, an approximate computation is used, not taking the variance
into account.
Returns
-------
Faint, cont : array
Residual and continuum estimated from the DCT decomposition.
"""
nl = w_raw.shape[0]
D0 = DCTMAT(nl, order)
shape = w_raw.shape[1:]
nspec = np.prod(shape)
if approx:
# Compute the DCT transformation, without using the variance.
#
# Given the transformation matrix D0, we compute for each spectrum S:
#
# C = D0.D0^t.S
#
# Old version using tensordot:
# A = np.dot(D0, D0.T)
# cont = np.tensordot(A, w_raw, axes=(0, 0))
# Looping on spectra and using multidot is ~6x faster:
# D0 is typically 3681x11 elements, so it is much more efficient
# to compute D0^t.S first (note the array is reshaped below)
cont = [
multi_dot([D0, D0.T, w_raw[:, y, x]])
for y, x in progressbar(np.ndindex(shape), total=nspec)
]
# For reference, this is identical to the following scipy version,
# though scipy is 2x slower than tensordot (probably because it
# computes all the coefficients)
# from scipy.fftpack import dct
# w = (np.arange(nl) < (order + 1)).astype(int)
# cont = dct(dct(w_raw, type=2, norm='ortho', axis=0) * w[:,None,None],
# type=3, norm='ortho', axis=0, overwrite_x=False)
else:
# Compute the DCT transformation, using the variance.
#
# As the noise differs on each spectral component, we need to take into
# account the (diagonal) covariance matrix Σ for each spectrum S:
#
# C = D0.(D^t.Σ^-1.D)^-1.D0^t.Σ^-1.S
#
w_raw_var = w_raw / var
D0T = D0.T
# Old version (slow):
# def continuum(D0, D0T, var, w_raw_var):
# A = np.linalg.inv(np.dot(D0T / var, D0))
# return np.dot(np.dot(np.dot(D0, A), D0T), w_raw_var)
#
# cont = Parallel()(
# delayed(continuum)(D0, D0T, var[:, i, j], w_raw_var[:, i, j])
# for i in range(w_raw.shape[1]) for j in range(w_raw.shape[2]))
# cont = np.asarray(cont).T.reshape(w_raw.shape)
# map of valid spaxels, i.e. spaxels with at least one valid value
valid = ~np.any(mask, axis=0)
from numpy.linalg import inv
cont = []
for y, x in progressbar(np.ndindex(shape), total=nspec):
if valid[y, x]:
res = multi_dot(
[D0, inv(np.dot(D0T / var[:, y, x], D0)), D0T, w_raw_var[:, y, x]]
)
else:
res = multi_dot([D0, D0.T, w_raw[:, y, x]])
cont.append(res)
return np.stack(cont).T.reshape(w_raw.shape)
def compute_segmap_gauss(data, pfa, fwhm_fsf=0, bins='fd'):
"""Compute segmentation map from an image, using gaussian statistics.
Parameters
----------
data : array
Input values, typically from a O2 test.
pfa : float
Desired false alarm.
fwhm : int
Width (in integer pixels) of the filter, to convolve with a PSF disc.
bins : str
Method for computings bins (see numpy.histogram_bin_edges).
Returns
-------
float, array
threshold, and labeled image.
"""
# test threshold : uses a Gaussian approximation of the test statistic
# under H0
histO2, frecO2, gamma, mea, std = compute_thresh_gaussfit(data, pfa, bins=bins)
# threshold - erosion and dilation to clean ponctual "source"
mask = data > gamma
mask = binary_erosion(mask, border_value=1, iterations=1)
mask = binary_dilation(mask, iterations=1)
# convolve with PSF
if fwhm_fsf > 0:
fwhm_pix = int(fwhm_fsf) // 2
size = fwhm_pix * 2 + 1
disc = np.hypot(*list(np.mgrid[:size, :size] - fwhm_pix)) < fwhm_pix
mask = fftconvolve(mask, disc, mode='same')
mask = mask > 1e-9
return gamma, ndi_label(mask)[0]
def compute_deblended_segmap(
image, npixels=5, snr=3, dilate_size=11, maxiters=5, sigma=3, fwhm=3.0, kernelsize=5
):
"""Compute segmentation map using photutils.
The segmentation map is computed with the following steps:
- Creation of a mask of sources with the ``snr`` threshold, using
`photutils.make_source_mask`.
- Estimation of the background statistics with this mask
(`astropy.stats.sigma_clipped_stats`), to estimate a refined threshold
with ``median + sigma * rms``.
- Convolution with a Gaussian kernel.
- Creation of the segmentation image, using `photutils.detect_sources`.
- Deblending of the segmentation image, using `photutils.deblend_sources`.
Parameters
----------
image : mpdaf.obj.Image
The input image.
npixels : int
The number of connected pixels that an object must have to be detected.
snr, dilate_size :
See `photutils.make_source_mask`.
maxiters, sigma :
See `astropy.stats.sigma_clipped_stats`.
fwhm : float
Kernel size (pixels) for the PSF convolution.
kernelsize : int
Size of the convolution kernel.
Returns
-------
`~mpdaf.obj.Image`
The deblended segmentation map.
"""
from astropy.convolution import Gaussian2DKernel
from photutils import make_source_mask, detect_sources
data = image.data
mask = make_source_mask(data, snr=snr, npixels=npixels, dilate_size=dilate_size)
bkg_mean, bkg_median, bkg_rms = sigma_clipped_stats(
data, sigma=sigma, mask=mask, maxiters=maxiters
)
threshold = bkg_median + sigma * bkg_rms
logger = logging.getLogger(__name__)
logger.info(
'Background Median %.2f RMS %.2f Threshold %.2f', bkg_median, bkg_rms, threshold
)
sig = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sig, x_size=kernelsize, y_size=kernelsize)
kernel.normalize()
segm = detect_sources(data, threshold, npixels=npixels, filter_kernel=kernel)
segm_deblend = phot_deblend_sources(
image, segm, npixels=npixels, filter_kernel=kernel, mode='linear'
)
return segm_deblend
def phot_deblend_sources(img, segmap, **kwargs):
"""Wrapper to catch warnings from deblend_sources."""
from photutils import deblend_sources
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
category=AstropyUserWarning,
message='.*contains negative values.*',
)
deblend = deblend_sources(img.data, segmap, **kwargs)
return Image(data=deblend.data, wcs=img.wcs, mask=img.mask, copy=False)
def createradvar(cu, ot):
"""Compute the compactness of areas using variance of position.
The variance is computed on the position given by adding one of the 'ot'
to 'cu'.
Parameters
----------
cu : 2D array
The current array
ot : 3D array
The other array
Returns
-------
var : array
The radial variances
"""
N = ot.shape[0]
out = np.zeros(N)
for n in range(N):
tmp = cu + ot[n, :, :]
y, x = np.where(tmp > 0)
r = np.sqrt((y - y.mean()) ** 2 + (x - x.mean()) ** 2)
out[n] = np.var(r)
return out
def fusion_areas(label, MinSize, MaxSize, option=None):
"""Function which merge areas which have a surface less than
MinSize if the size after merging is less than MaxSize.
The criteria of neighbor can be related to the minimum surface
or to the compactness of the output area
Parameters
----------
label : area
The labels of areas
MinSize : int
The size of areas under which they need to merge
MaxSize : int
The size of areas above which they cant merge
option : string
if 'var' the compactness criteria is used
if None the minimum surface criteria is used
Returns
-------
label : array
The labels of merged areas
"""
while True:
indlabl = np.argsort(np.sum(label, axis=(1, 2)))
tampon = label.copy()
for n in indlabl:
# if the label is not empty
cu = label[n, :, :]
cu_size = np.sum(cu)
if cu_size > 0 and cu_size < MinSize:
# search for neighbors
labdil = label[n, :, :].copy()
labdil = binary_dilation(labdil, iterations=1)
# only neighbors
test = np.sum(label * labdil[np.newaxis, :, :], axis=(1, 2)) > 0
indice = np.where(test == 1)[0]
ind = np.where(indice != n)[0]
indice = indice[ind]
# BOUCLER SUR LES CANDIDATS
ot = label[indice, :, :]
# test size of current with neighbor
if option is None:
test = np.sum(ot, axis=(1, 2))
elif option == 'var':
test = createradvar(cu, ot)
else:
raise ValueError('bad option')
if len(test) > 0:
# keep the min-size
ind = np.argmin(test)
cand = indice[ind]
if (np.sum(label[n, :, :]) + test[ind]) < MaxSize:
label[n, :, :] += label[cand, :, :]
label[cand, :, :] = 0
# clean empty area
ind = np.sum(label, axis=(1, 2)) > 0
label = label[ind, :, :]
tampon = tampon[ind, :, :]
if np.sum(tampon - label) == 0:
break
return label
@timeit
def area_segmentation_square_fusion(nexpmap, MinS, MaxS, NbSubcube, Ny, Nx):
"""Create non square area based on continuum test.
The full 2D image is first segmented in subcube. The area are fused in case
they are too small. Thanks to the continuum test, detected sources are
fused with associated area. The convex enveloppe of the sources inside each
area is then done. Finally all the convex enveloppe growth until using all
the pixels
Parameters
----------
nexpmap : 2D array
the active pixel of the image
MinS : int
The size of areas under which they need to merge
MaxS : int
The size of areas above which they cant merge
NbSubcube : int
Number of subcubes for the spatial segmentation
Nx : int
Number of columns
Ny : int
Number of rows
Returns
-------
label : array
label of the fused square
"""
# square area index with borders
Vert = np.sum(nexpmap, axis=1)
Hori = np.sum(nexpmap, axis=0)
y1 = np.where(Vert > 0)[0][0]
x1 = np.where(Hori > 0)[0][0]
y2 = Ny - np.where(Vert[::-1] > 0)[0][0]
x2 = Nx - np.where(Hori[::-1] > 0)[0][0]
start = (y1, x1)
inty, intx = spatial_segmentation(Nx, Ny, NbSubcube, start=start)
# % FUSION square AREA
label = []
for numy in range(NbSubcube):
for numx in range(NbSubcube):
y1, y2, x1, x2 = inty[numy + 1], inty[numy], intx[numx], intx[numx + 1]
tmp = nexpmap[y1:y2, x1:x2]
if np.mean(tmp) != 0:
labtest = ndi_label(tmp)[0]
labtmax = labtest.max()
for n in range(labtmax):
label_tmp = np.zeros((Ny, Nx))
label_tmp[y1:y2, x1:x2] = labtest == (n + 1)
label.append(label_tmp)
label = np.array(label)
return fusion_areas(label, MinS, MaxS)
@timeit
def area_segmentation_sources_fusion(labsrc, label, pfa, Ny, Nx):
"""Function to create non square area based on continuum test. Thanks
to the continuum test, detected sources are fused with associated area.
The convex enveloppe of the sources inside
each area is then done. Finally all the convex enveloppe growth until
using all the pixels
Parameters
----------
labsrc : array
segmentation map
label : array
label of fused square generated in area_segmentation_square_fusion
pfa : float
Pvalue for the test which performs segmentation
NbSubcube : int
Number of subcubes for the spatial segmentation
Nx : int
Number of columns
Ny : int
Number of rows
Returns
-------
label_out : array
label of the fused square and sources
"""
# compute the sources label
nlab = labsrc.max()
sources = np.zeros((nlab, Ny, Nx))
for n in range(1, nlab + 1):
sources[n - 1, :, :] = (labsrc == n) > 0
sources_save = sources.copy()
nlabel = label.shape[0]
nsrc = sources.shape[0]
for n in range(nsrc):
cu_src = sources[n, :, :]
# find the area in which the current source
# has bigger probability to be
test = np.sum(cu_src[np.newaxis, :, :] * label, axis=(1, 2))
if len(test) > 0:
ind = np.argmax(test)
# associate the source to the label
label[ind, :, :] = (label[ind, :, :] + cu_src) > 0
# mask other labels from this sources
mask = (1 - label[ind, :, :])[np.newaxis, :, :]
ot_lab = np.delete(np.arange(nlabel), ind)
label[ot_lab, :, :] *= mask
# delete the source
sources[n, :, :] = 0
return label, np.sum(sources_save, axis=0)
@timeit
def area_segmentation_convex_fusion(label, src):
"""Function to compute the convex enveloppe of the sources inside
each area is then done. Finally all the convex enveloppe growth until
using all the pixels
Parameters
----------
label : array
label containing the fusion of fused squares and sources
generated in area_segmentation_sources_fusion
src : array
label of estimated sources from segmentation map
Returns
-------
label_out : array
label of the convex
"""
label_fin = []
# for each label
for lab_n in range(label.shape[0]):
# keep only the sources inside the label
lab = label[lab_n, :, :]
data = src * lab
if np.sum(data > 0):
points = np.array(np.where(data > 0)).T
y_0 = points[:, 0].min()
x_0 = points[:, 1].min()
points[:, 0] -= y_0
points[:, 1] -= x_0
sny, snx = points[:, 0].max() + 1, points[:, 1].max() + 1
# compute the convex enveloppe of a sub part of the label
lab_temp = Convexline(points, snx, sny)
# in full size
label_out = np.zeros((label.shape[1], label.shape[2]))
label_out[y_0 : y_0 + sny, x_0 : x_0 + snx] = lab_temp
label_out *= lab
label_fin.append(label_out)
return np.array(label_fin)
def Convexline(points, snx, sny):
"""Function to compute the convex enveloppe of the sources inside
each area is then done and full the polygone
Parameters
----------
data : array
contain the position of source for one of the label
snx,sny: int,int
the effective size of area in the label
Returns
-------
lab_out : array
The filled convex enveloppe corresponding the sub label
"""
# convex enveloppe vertices
hull = ConvexHull(points)
xs = hull.points[hull.simplices[:, 1]]
xt = hull.points[hull.simplices[:, 0]]
sny, snx = points[:, 0].max() + 1, points[:, 1].max() + 1
tmp = np.zeros((sny, snx))
# create le line between vertices
for n in range(hull.simplices.shape[0]):
x0, x1, y0, y1 = xs[n, 1], xt[n, 1], xs[n, 0], xt[n, 0]
nx = | np.abs(x1 - x0) | numpy.abs |
# -*- coding: utf-8 -*-
# Copyright 2019 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM-Review-Requirement: Art30.3
# Please note that the following code was developed for the project VaVeL at IBM Research
# -- Ireland, funded by the European Union under the Horizon 2020 Program.
# The project started on December 1st, 2015 and was completed by December 1st,
# 2018. Thus, in accordance with Article 30.3 of the Multi-Beneficiary General
# Model Grant Agreement of the Program, the above limitations are in force.
# For further details please contact <NAME> (<EMAIL>),
# or <NAME> (<EMAIL>).
# If you use this code, please cite our paper:
# @inproceedings{kozdoba2018,
# title={On-Line Learning of Linear Dynamical Systems: Exponential Forgetting in Kalman Filters},
# author={Kozdoba, <NAME> Marecek, <NAME> and <NAME>},
# booktitle = {The Thirty-Third AAAI Conference on Artificial Intelligence (AAAI-19)},
# note={arXiv preprint arXiv:1809.05870},
# year={2019}
#}
from __future__ import print_function
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import scipy.optimize as opt
import numpy as np
import rlcompleter
from sklearn.metrics import f1_score
import time
import timeit
import math
# debugging
import pdb
pdb.Pdb.complete=rlcompleter.Completer(locals()).complete
import traceback
# Matlab loading
import tables
from scipy.io import loadmat
verbose = False
from onlinelds import *
from inputlds import *
def close_all_figs():
plt.close('all')
def testIdentification(sys, filenameStub = "test", noRuns = 2, T = 100, k = 5, etaZeros = None, ymin = None, ymax = None, sequenceLabel = None, haveSpectral = True):
""" noRuns is the number of runs, T is the time horizon, k is the number of filters, """
if k>T:
print("Number of filters (k) must be less than or equal to the number of time-steps (T).")
exit()
if not etaZeros:
etaZeros = [1.0, 2500.0]
print("etaZeros:")
print(etaZeros)
filename = './outputs/' + filenameStub+'.pdf'
pp = PdfPages(filename)
error_AR_data = None
error_spec_data = None
error_persist_data = None
for i in range(noRuns):
print("run %i" % i)
inputs = np.zeros(T)
sys.solve([[1],[0]],inputs,T)
if haveSpectral:
predicted_spectral, M, error_spec, error_persist = wave_filtering_SISO_ftl(sys, T, k)
if error_spec_data is None: error_spec_data = error_spec
else: error_spec_data = np.vstack((error_spec_data, error_spec))
if error_persist_data is None: error_persist_data = error_persist
else: error_persist_data = np.vstack((error_persist_data, error_persist))
for etaZero in etaZeros:
error_AR = np.zeros(T)
predicted_AR = np.zeros(T)
s=2
D=1.
theta = [0 for i in range(s)]
for t in range(s,T):
eta = pow(float(t),-0.5) / etaZero
Y = sys.outputs[t]
loss = cost_AR(theta, Y, list(reversed(sys.outputs[t-s:t])))
error_AR[t] = pow(loss, 0.5)
grad = gradient_AR(theta, Y, list(reversed(sys.outputs[t-s:t])))
#print("Loss: at time step %d :" % (t), loss)
theta = [theta[i] -eta*grad[i] for i in range(len(theta))] #gradient step
norm_theta = np.linalg.norm(theta)
if norm_theta>D: theta = [D*i/norm_theta for i in theta] #projection step
predicted_AR[t] = np.dot(list(reversed(sys.outputs[t-s:t])),theta)
if error_AR_data is None: error_AR_data = error_AR
else: error_AR_data = np.vstack((error_AR_data, error_AR))
p1 = plt.figure()
if ymax and ymin: plt.ylim(ymin, ymax)
if sum(inputs[1:]) > 0: plt.plot(inputs[1:], label='Input')
if sequenceLabel: plt.plot([float(i) for i in sys.outputs][1:], label=sequenceLabel, color='#000000', linewidth=2, antialiased = True)
else: plt.plot([float(i) for i in sys.outputs][1:], label='Output', color='#000000', linewidth=2, antialiased = True)
#plt.plot([-i for i in predicted_output], label='Predicted output') #for some reason, usual way produces -ve estimate
if haveSpectral:
plt.plot([i for i in predicted_spectral], label='Spectral')
#lab = 'AR(3) / OGD, c_0 = ' + str(etaZero)
lab = "AR(" + str(s) + "), c = " + str(int(etaZero))
plt.plot(predicted_AR, label = lab)
plt.legend()
plt.xlabel('Time')
plt.ylabel('Output')
p1.show()
p1.savefig(pp, format='pdf')
p2 = plt.figure()
plt.ylim(0, 20)
if haveSpectral:
plt.plot(error_spec, label='Spectral')
plt.plot(error_persist, label='Persistence')
plt.plot(error_AR, label=lab)
plt.legend()
p2.show()
plt.xlabel('Time')
plt.ylabel('Error')
p2.savefig(pp, format='pdf')
error_AR_mean = np.mean(error_AR_data, 0)
error_AR_std = np.std(error_AR_data, 0)
if haveSpectral:
error_spec_mean = np.mean(error_spec_data, 0)
error_spec_std = np.std(error_spec_data, 0)
error_persist_mean = np.mean(error_persist_data, 0)
error_persist_std = np.std(error_persist_data, 0)
p3 = plt.figure()
if ymax and ymin: plt.ylim(ymin, ymax)
if haveSpectral:
plt.plot(error_spec_mean, label='Spectral', color='#1B2ACC', linewidth=2, antialiased = True)
plt.fill_between(range(0,T-1), error_spec_mean-error_spec_std, error_spec_mean+error_spec_std, alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=1, antialiased=True)
plt.plot(error_persist_mean, label='Persistence', color='#CC1B2A', linewidth=2, antialiased = True)
plt.fill_between(range(0,T-1), error_persist_mean-error_persist_std, error_persist_mean+error_persist_std, alpha=0.2, edgecolor='#CC1B2A', facecolor='#FF0800',
linewidth=1, antialiased=True)
cAR1 = (42.0/255, 204.0 / 255.0, 1.0/255)
bAR1 = (1.0, 204.0 / 255.0, 0.0) # , alphaValue
plt.ylim(0, 20)
plt.plot(error_AR_mean, label='AR(3)', color=cAR1, linewidth=2, antialiased = True)
plt.fill_between(range(0,T), error_AR_mean-error_AR_std, error_AR_mean+error_AR_std, alpha=0.2, edgecolor=cAR1, facecolor=bAR1,
linewidth=1, antialiased=True)
plt.legend()
plt.xlabel('Time')
plt.ylabel('Error')
p3.savefig(pp, format='pdf')
pp.close()
print("See the output in " + filename)
def testIdentification2(T = 100, noRuns = 10, sChoices = [15,3,1], haveKalman = False, haveSpectral = True, G = np.matrix([[0.999,0],[0,0.5]]), F_dash = np.matrix([[1,1]]), sequenceLabel = ""):
if haveKalman: sChoices = sChoices + [T]
if len(sequenceLabel) > 0: sequenceLabel = " (" + sequenceLabel + ")"
if noRuns < 2:
print("Number of runs has to be larger than 1.")
exit()
filename = './outputs/AR.pdf'
pp = PdfPages(filename)
################# SYSTEM ###################
proc_noise_std = 0.5
obs_noise_std = 0.5
error_spec_data = None
error_persist_data = None
error_AR1_data = None
error_Kalman_data = None
for runNo in range(noRuns):
sys = dynamical_system(G,np.zeros((2,1)),F_dash,np.zeros((1,1)),
process_noise='gaussian',
observation_noise='gaussian',
process_noise_std=proc_noise_std,
observation_noise_std=obs_noise_std,
timevarying_multiplier_b = None)
inputs = np.zeros(T)
sys.solve([[1],[1]],inputs,T)
Y = [i[0,0] for i in sys.outputs]
#pdb.set_trace()
############################################
########## PRE-COMPUTE FILTER PARAMS ###################
n = G.shape[0]
m = F_dash.shape[0]
W = proc_noise_std**2 * np.matrix(np.eye(n))
V = obs_noise_std**2 * np.matrix(np.eye(m))
#m_t = [np.matrix([[0],[0]])]
C = [np.matrix(np.eye(2))]
R = []
Q = []
A = []
Z = []
for t in range(T):
R.append(G * C[-1] * G.transpose() + W)
Q.append(F_dash * R[-1] * F_dash.transpose() + V)
A.append(R[-1]*F_dash.transpose()*np.linalg.inv(Q[-1]))
C.append(R[-1] - A[-1]*Q[-1]*A[-1].transpose() )
Z.append(G*( np.eye(2) - A[-1] * F_dash ))
#PREDICTION
plt.plot(Y, label='Output', color='#000000', linewidth=2, antialiased = True)
for s in sChoices:
Y_pred=[]
for t in range(T):
Y_pred_term1 = F_dash * G * A[t] * sys.outputs[t]
if t==0:
Y_pred.append(Y_pred_term1)
continue
acc = 0
for j in range(min(t,s)+1):
for i in range(j+1):
if i==0:
ZZ=Z[t-i]
continue
ZZ = ZZ*Z[t-i]
acc += ZZ * G * A[t-j-1] * Y[t-j-1]
Y_pred.append(Y_pred_term1 + F_dash*acc)
#print(np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(len(Y))]))
#print(lab)
if s == 1:
if error_AR1_data is None: error_AR1_data = np.array([pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))])
else:
#print(error_AR1_data.shape)
error_AR1_data = np.vstack((error_AR1_data, [pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))]))
if s == T:
# For the spectral filtering etc, we use: loss = pow(np.linalg.norm(sys.outputs[t] - y_pred), 2)
if error_Kalman_data is None: error_Kalman_data = np.array([pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))])
else: error_Kalman_data = np.vstack((error_Kalman_data, [pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))]))
plt.plot([i[0,0] for i in Y_pred], label="Kalman" + sequenceLabel, color=(42.0/255.0, 204.0 / 255.0, 200.0/255.0), linewidth=2, antialiased = True)
else:
plt.plot([i[0,0] for i in Y_pred], label='AR(%i)' % (s+1) + sequenceLabel, color=(42.0/255.0, 204.0 / 255.0, float(min(255.0,s))/255.0), linewidth=2, antialiased = True)
plt.xlabel('Time')
plt.ylabel('Prediction')
if haveSpectral:
predicted_output, M, error_spec, error_persist = wave_filtering_SISO_ftl(sys, T, 5)
plt.plot(predicted_output, label='Spectral' + sequenceLabel, color='#1B2ACC', linewidth=2, antialiased = True)
if error_spec_data is None: error_spec_data = error_spec
else: error_spec_data = np.vstack((error_spec_data, error_spec))
if error_persist_data is None: error_persist_data = error_persist
else: error_persist_data = np.vstack((error_persist_data, error_persist))
plt.legend()
plt.savefig(pp, format='pdf')
plt.close('all')
#plt.show()
if haveSpectral:
error_spec_mean = np.mean(error_spec_data, 0)
error_spec_std = np.std(error_spec_data, 0)
error_persist_mean = np.mean(error_persist_data, 0)
error_persist_std = np.std(error_persist_data, 0)
error_AR1_mean = np.mean(error_AR1_data, 0)
error_AR1_std = np.std(error_AR1_data, 0)
if haveKalman:
error_Kalman_mean = np.mean(error_Kalman_data, 0)
error_Kalman_std = np.std(error_Kalman_data, 0)
for (ylim, alphaValue) in [((0, 100.0), 0.2), ((0.0, 1.0), 0.05)]:
for Tlim in [T-1, min(T-1, 20)]:
#p3 = plt.figure()
p3, ax = plt.subplots()
plt.ylim(ylim)
if haveSpectral:
plt.plot(range(0,Tlim), error_spec[:Tlim], label='Spectral' + sequenceLabel, color='#1B2ACC', linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_spec_mean-error_spec_std)[:Tlim], (error_spec_mean+error_spec_std)[:Tlim], alpha=alphaValue, edgecolor='#1B2ACC', facecolor='#089FFF', linewidth=1, antialiased=True)
plt.plot(range(0,Tlim), error_persist[:Tlim], label='Persistence' + sequenceLabel, color='#CC1B2A', linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_persist_mean-error_persist_std)[:Tlim], (error_persist_mean+error_persist_std)[:Tlim], alpha=alphaValue, edgecolor='#CC1B2A', facecolor='#FF0800', linewidth=1, antialiased=True)
#import matplotlib.transforms as mtransforms
#trans = mtransforms.blended_transform_factory(ax.transData, ax.transData)
#trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
cAR1 = (42.0/255, 204.0 / 255.0, 1.0/255)
bAR1 = (1.0, 204.0 / 255.0, 0.0) # , alphaValue
print(cAR1)
print(bAR1)
#print(error_AR1_data)
#print(error_AR1_mean)
#print(Tlim)
plt.plot(error_AR1_mean[:Tlim], label='AR(2)' + sequenceLabel, color=cAR1, linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_AR1_mean-error_AR1_std)[:Tlim], (error_AR1_mean+error_AR1_std)[:Tlim], alpha=alphaValue, edgecolor=cAR1, facecolor=bAR1, linewidth=1, antialiased=True) #transform=trans) #offset_position="data") alpha=alphaValue,
if haveKalman:
cK = (42.0/255.0, 204.0 / 255.0, 200.0/255.0)
bK = (1.0, 204.0 / 255.0, 200.0/255.0) # alphaValue
print(cK)
print(bK)
plt.plot(error_Kalman_mean[:Tlim], label='Kalman' + sequenceLabel, color=cK, linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_Kalman_mean-error_Kalman_std)[:Tlim], (error_Kalman_mean+error_Kalman_std)[:Tlim], alpha=alphaValue, facecolor=bK, edgecolor=cK, linewidth=1, antialiased=True) # transform = trans) #offset_position="data")
plt.legend()
plt.xlabel('Time')
plt.ylabel('Error')
#p3.show()
p3.savefig(pp, format='pdf')
pp.close()
# This is taken from pyplot documentation
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def testNoiseImpact(T = 50, noRuns = 10, discretisation = 10):
filename = './outputs/noise.pdf'
pp = PdfPages(filename)
for s in [1, 2, 3, 7]:
data = | np.zeros((discretisation, discretisation)) | numpy.zeros |
import unittest
from functools import partial
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
## RELIEF ALGORITHM IMPLEMENTATION UNIT TESTS ##########
from algorithms.relief import Relief
class TestRelief(unittest.TestCase):
# Test initialization with default parameters.
def test_init_default(self):
relief = Relief()
self.assertEqual(relief.n_features_to_select, 10)
self.assertEqual(relief.m, -1)
self.assertNotEqual(relief.dist_func, None)
self.assertEqual(relief.learned_metric_func, None)
# Test initialization with explicit parameters.
def test_init_custom(self):
relief = Relief(n_features_to_select=15, m=80, dist_func=lambda x1, x2: np.sum(np.abs(x1-x2), 1), learned_metric_func = lambda x1, x2: np.sum(np.abs(x1-x2), 1))
self.assertEqual(relief.n_features_to_select, 15)
self.assertEqual(relief.m, 80)
self.assertNotEqual(relief.dist_func, None)
self.assertNotEqual(relief.learned_metric_func, None)
# Test update of feature weights.
def test_weights_update(self):
relief = Relief()
# Initialize parameter values.
data = np.array([[2.09525, 0.26961, 3.99627],
[9.86248, 6.22487, 8.77424],
[7.03015, 9.24269, 3.02136],
[8.95009, 8.52854, 0.16166],
[3.41438, 4.03548, 7.88157],
[2.01185, 0.84564, 6.16909],
[2.79316, 1.71541, 2.97578],
[3.22177, 0.16564, 5.79036],
[1.81406, 2.74643, 2.13259],
[4.77481, 8.01036, 7.57880]])
target = np.array([1, 2, 2, 2, 1, 1, 3, 3, 3, 1])
e = data[2, :] # The third example
closest_same = data[3, :] # Closest example from same class
closest_other = data[9, :] # Closest example from different class
weights = np.ones(data.shape[1]) # Current feature weights
m = data.shape[0] # Number of examples to sample
max_f_vals = np.max(data, 0) # Max value of each feature
min_f_vals = np.min(data, 0) # Min value of each feature
# Compute weights update
res = relief._update_weights(data, e, closest_same, closest_other, weights, m, max_f_vals, min_f_vals)
# Compare with results computed by hand.
correct_res = np.array([1.004167277552613, 1.0057086828870614, 1.01971232778099])
assert_array_almost_equal(res, correct_res, decimal=5)
# Test relief algorithm
def test_relief(self):
# training examples
data = np.array([[2.09525, 0.26961, 3.99627],
[9.86248, 6.22487, 8.77424],
[7.03015, 9.24269, 3.02136],
[4.77481, 8.01036, 7.57880]])
# class values
target = np.array([1, 2, 2, 1])
relief = Relief(n_features_to_select=2, m=data.shape[0])
relief = relief.fit(data, target)
# Get results of methods.
res_rank = relief.rank
res_weights = relief.weights
# Compare with results computed by hand.
correct_res_rank = np.array([1, 2, 3])
correct_res_weights = np.array([0.11295758, -0.23107757, -0.32095185])
assert_array_almost_equal(res_rank, correct_res_rank)
assert_array_almost_equal(res_weights, correct_res_weights)
########################################################
## RELIEFF ALGORITHM IMPLEMENTATION UNIT TESTS #########
from algorithms.relieff import Relieff
class TestRelieff(unittest.TestCase):
# Test initialization with default parameters.
def test_init_default(self):
relieff = Relieff()
self.assertEqual(relieff.n_features_to_select, 10)
self.assertEqual(relieff.m, -1)
self.assertEqual(relieff.k, 5)
self.assertNotEqual(relieff.dist_func, None)
self.assertEqual(relieff.learned_metric_func, None)
# Test initialization with explicit parameters.
def test_init_custom(self):
relieff = Relieff(n_features_to_select=15, m=80, k=3, dist_func=lambda x1, x2: np.sum(np.abs(x1-x2), 1), learned_metric_func = lambda x1, x2: np.sum(np.abs(x1-x2), 1))
self.assertEqual(relieff.n_features_to_select, 15)
self.assertEqual(relieff.m, 80)
self.assertEqual(relieff.k, 3)
self.assertNotEqual(relieff.dist_func, None)
self.assertNotEqual(relieff.learned_metric_func, None)
# Test update of feature weights.
def test_weights_update(self):
relieff = Relieff(k=2)
# Initialize parameter values.
data = np.array([[2.09525, 0.26961, 3.99627],
[9.86248, 6.22487, 8.77424],
[7.03015, 9.24269, 3.02136],
[8.95009, 8.52854, 0.16166],
[3.41438, 4.03548, 7.88157],
[2.01185, 0.84564, 6.16909],
[2.79316, 1.71541, 2.97578],
[3.22177, 0.16564, 5.79036],
[1.81406, 2.74643, 2.13259],
[4.77481, 8.01036, 7.57880]])
target = np.array([1, 2, 2, 2, 1, 1, 3, 3, 3, 1])
e = data[2, :] # The third example.
closest_same = np.vstack((data[3, :], data[1, :])) # Closest examples from same class
closest_other = np.vstack((data[9, :], data[4, :], data[6, :], data[8, :])) # Closest example from different classes
weights = np.zeros(data.shape[1]) # Feature weights
weights_mult = np.array([0.4/0.7, 0.4/0.7, 0.3/0.7, 0.3/0.7]) # Weights multipliers
m = data.shape[0] # number of examples to sample
k = 2 # Number of examples from each class to take.
max_f_vals = np.max(data, 0) # Max value of each feature
min_f_vals = np.min(data, 0) # Min value of each feature
# Compute weights update
res = relieff._update_weights(data, e[np.newaxis], closest_same, closest_other, weights[np.newaxis], weights_mult[np.newaxis].T, m, k, max_f_vals[np.newaxis], min_f_vals[np.newaxis])
# Compare with results computed by hand.
correct_res = np.array([0.01648752, 0.03281824, -0.01643311])
| assert_array_almost_equal(res, correct_res, decimal=5) | numpy.testing.assert_array_almost_equal |
import json
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
mainPath = Path('/yourpathhere/')
folderPath5 = mainPath.joinpath('MNIST_5Dim')
folderPath50 = mainPath.joinpath('MNIST_50Dim')
folderPath75 = mainPath.joinpath('MNIST_75Dim')
folderPath100 = mainPath.joinpath('MNIST_100Dim')
filePath5 = folderPath5.joinpath('losses_and_nfes.json')
filePath50 = folderPath50.joinpath('losses_and_nfes.json')
filePath75 = folderPath75.joinpath('losses_and_nfes.json')
filePath100 = folderPath100.joinpath('losses_and_nfes.json')
with open(filePath5) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
dicNode = d[1]
accuracyAnode5 = np.array(dicAnode['epoch_accuracy_history'])
nfeAnode5 = np.array(dicAnode['epoch_total_nfe_history'])
lossAnode5 = np.array(dicAnode['epoch_loss_history'])
with open(filePath50) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
accuracyAnode50 = np.array(dicAnode['epoch_accuracy_history'])
nfeAnode50 = np.array(dicAnode['epoch_total_nfe_history'])
lossAnode50 = np.array(dicAnode['epoch_loss_history'])
with open(filePath75) as f:
d = json.load(f)
print(d)
dicAnode = d[0]
accuracyAnode75 = | np.array(dicAnode['epoch_accuracy_history']) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
project: https://github.com/charnley/rmsd
license: https://github.com/charnley/rmsd/blob/master/LICENSE
"""
import copy
import os
import sys
import unittest
from contextlib import contextmanager
import numpy as np
import rmsd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestRMSD(unittest.TestCase):
"""Test the DSSP parser methods."""
def setUp(self):
"""Initialize the framework for testing."""
abs_path = os.path.abspath(os.path.dirname(__file__))
self.xyzpath = abs_path + "/tests/"
self.centroid = rmsd.centroid
self.rmsd = rmsd.rmsd
self.get_coordinates = rmsd.get_coordinates
self.get_coordinates_pdb = rmsd.get_coordinates_pdb
self.get_coordinates_xyz = rmsd.get_coordinates_xyz
self.get_coordinates_ase = rmsd.get_coordinates_ase
self.parse_periodic_case = rmsd.parse_periodic_case
self.kabsch_rmsd = rmsd.kabsch_rmsd
self.kabsch_rotate = rmsd.kabsch_rotate
self.kabsch_algo = rmsd.kabsch
self.quaternion_rmsd = rmsd.quaternion_rmsd
self.quaternion_rotate = rmsd.quaternion_rotate
self.quaternion_transform = rmsd.quaternion_transform
self.makeQ = rmsd.makeQ
self.makeW = rmsd.makeW
self.print_coordinates = rmsd.print_coordinates
self.reorder_brute = rmsd.reorder_brute
self.reorder_hungarian = rmsd.reorder_hungarian
self.reorder_distance = rmsd.reorder_distance
self.check_reflections = rmsd.check_reflections
def tearDown(self):
"""Clear the testing framework."""
self.xyzpath = None
self.centroid = None
self.rmsd = None
self.get_coordinates = None
self.get_coordinates_pdb = None
self.get_coordinates_xyz = None
self.kabsch_rmsd = None
self.kabsch_rotate = None
self.kabsch_algo = None
self.quaternion_rmsd = None
self.quaternion_rotate = None
self.quaternion_transform = None
self.makeQ = None
self.makeW = None
self.print_coordinates = None
self.reorder_brute = None
self.reorder_hungarian = None
self.reorder_distance = None
self.check_reflections = None
def assertListAlmostEqual(self, list1, list2, places):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, places=places)
def test_get_coordinates_pdb(self):
infile = self.xyzpath + 'ci2_1.pdb'
coords = self.get_coordinates_pdb(infile)
self.assertEqual('N', coords[0][0])
self.assertEqual([-7.173, -13.891, -6.266], coords[1][0].tolist())
def test_get_coordinates_xyz(self):
infile = self.xyzpath + 'ethane.xyz'
coords = self.get_coordinates_xyz(infile)
self.assertEqual('C', coords[0][0])
self.assertEqual([-0.98353, 1.81095, -0.0314], coords[1][0].tolist())
def test_get_coordinates(self):
infile = self.xyzpath + 'ci2_1.pdb'
coords = self.get_coordinates(infile, 'pdb')
self.assertEqual('N', coords[0][0])
self.assertEqual([-7.173, -13.891, -6.266], coords[1][0].tolist())
infile = self.xyzpath + 'ethane.xyz'
coords = self.get_coordinates(infile, 'xyz')
self.assertEqual('C', coords[0][0])
self.assertEqual([-0.98353, 1.81095, -0.0314], coords[1][0].tolist())
def test_centroid(self):
a1 = np.array([-19.658, 17.18, 25.163], dtype=float)
a2 = np.array([-20.573, 18.059, 25.88], dtype=float)
a3 = np.array([-22.018, 17.551, 26.0], dtype=float)
atms = | np.asarray([a1, a2, a3]) | numpy.asarray |
# SIMPLIFIED PERFORMANCE MODEL v1
import math
from tixi import tixiwrapper
import numpy
# 08.08.2018 - Script creation
# ----------------------------------------------------------------------------------------------------------------------
# PRE-PROCESSING (INPUTS)
Sigma = 1 # [-] ratio among air density at SL and air density of the airfield
g = 9.81 # [m/s^2] gravitational acceleration
k_descent = 0.47 # [-] power ratio in descent (from Artur's Engine Deck)
k_landing = 0.13 # [-] power ratio in landing (from Artur's Engine Deck)
# CPACS
INFILE = './ToolInput/toolInput.xml'
tixi_h = tixiwrapper.Tixi()
tixi_h.open(INFILE)
# MTOM
MTOM = tixi_h.getDoubleElement('/cpacs/vehicles/aircraft/model/analyses/massBreakdown/designMasses/mTOM/mass')
# Wing area
S = tixi_h.getDoubleElement('/cpacs/vehicles/aircraft/model/reference/area')
# TOFL
TO_length = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/TOFL')
# Cl @ takeoff
Cl_takeoff = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/Cl_takeoff')
# Number of engines
N_engines = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/NumberEngines')
# Speed during climb
V_climb = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/V_climb')
# Vertical speed
Vvert = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/VerticalSpeed')
# Altitude climb
Altitude_climb = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/AltitudeClimb')
# Altitude cruise
Altitude_cruise = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/AltitudeCruise')
# Cruise speed
V_cruise = tixi_h.getDoubleElement('/cpacs/toolspecific/SimplifiedPerformanceModel/Inputs/SpeedCruise')
# GET CL and CD from CPACS
XpathAeroPerformanceMap = '/cpacs/vehicles/aircraft/model/analyses/aeroPerformanceMap'
nMN = tixi_h.getVectorSize(XpathAeroPerformanceMap+'/machNumber')
nRN = tixi_h.getVectorSize(XpathAeroPerformanceMap+'/reynoldsNumber')
nAOY = tixi_h.getVectorSize(XpathAeroPerformanceMap+'/angleOfYaw')
nAOA = tixi_h.getVectorSize(XpathAeroPerformanceMap+'/angleOfAttack')
nCases = nMN*nRN*nAOY*nAOA
Mach_all = tixi_h.getFloatVector(XpathAeroPerformanceMap+'/machNumber', nMN)
Mach_all = numpy.array(Mach_all)
Re_all = tixi_h.getFloatVector(XpathAeroPerformanceMap+'/reynoldsNumber', nRN)
Re_all = numpy.array(Re_all)
Cd_all = tixi_h.getArray(XpathAeroPerformanceMap, 'cdT', nCases)
Cl_all = tixi_h.getArray(XpathAeroPerformanceMap, 'clT', nCases)
Cfx_all = tixi_h.getArray(XpathAeroPerformanceMap, 'cfx', nCases)
Cfy_all = tixi_h.getArray(XpathAeroPerformanceMap, 'cfy', nCases)
Cfz_all = tixi_h.getArray(XpathAeroPerformanceMap, 'cfz', nCases)
alpha_all = tixi_h.getFloatVector(XpathAeroPerformanceMap+'/angleOfAttack', nAOA)
alpha_all = | numpy.array(alpha_all) | numpy.array |
'''
This file contains the functions for the groupig/clustering algorithm from Watkins & Yang, J. Phys. Chem. B, vol 109, no 1, 2005
that do the actual work. THis includes
(1) initial hierarchical clustering - InitialClustering
(2) with that clustering as input, Bayesian Inf. Crit. based clustering into m-levels
'''
# import approxpoissondpf
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
from scipy.stats import poisson
import pandas as pd
def Grouping(segmentlengths, segmentcounts, mingroups, maxgroups):
'''
input
segmentlengths : the lengths of the CPA segments
segmentcounts : the nr of counts in each CPA segment
mingroups : the minimum nr of groups to test for
maxgroups : the maximum nr of groups to test for
output
groupingdata : the nr of states and their likelihoods with 2 different methods from Watkins & Young
mlikely_tot : most likely trajectory though the states
psummarry : the likelihood of drawing a certain state, given m states. This is in TIME occupancy, not NUMBER occupancy (see definition of pm)
'''
initialcluster = InitialClustering(segmentlengths, segmentcounts, plot=False)
Schw = np.zeros([maxgroups, 2])
psummarry = np.zeros([maxgroups, maxgroups])
mlst = np.arange(mingroups,maxgroups+1) # nr of levels under test
mlikely_tot = np.zeros((maxgroups, len(segmentlengths)), dtype=int)
for mgroups in mlst:
(pm, Im, Tm, mlikely, schwartz, pmj) = ExpectationMaximizationClustering(segmentlengths, segmentcounts, mgroups, initialcluster[mgroups-1])
Schw[mgroups-mingroups] = schwartz
mlikely_tot[mgroups-1] = mlikely
psummarry[mgroups-1, 0:mgroups] = pm
groupingdata = {'mlst':mlst, 'Schw0':Schw[:,0], 'Schw1':Schw[:,1]}
groupingdata = pd.DataFrame(groupingdata)
cols =['mlst', 'Schw0', 'Schw1']
groupingdata = groupingdata[cols]
return groupingdata, mlikely_tot, psummarry
def ApproxPoissPDF(k, mu):
logpdf=k*np.log(mu)-mu-(k*np.log(k)-k+0.5*np.log(2*np.pi*k))
idx = np.where(k==0)
for i in idx:
logpdf[i] = np.log(poisson.pmf(0,mu[i]))
pdf = np.exp(logpdf)
return (pdf, logpdf)
def ClusterOnce(TG, NG, assignment):
'''
This function finds the two most similar segments in an array and clusters them into one state
TG: jumplengths
NG: rNlevels
'''
# note that my meshgrids start at zero. Need to add 1 to get other indexing
[Tm, Tj] = np.meshgrid(TG, TG)
[Nm, Nj] = np.meshgrid(NG, NG)
[m, j] = np.meshgrid(np.arange(len(NG)), np.arange(len(NG)))
m = m+1
j = j+1
#should this be Mmj or Mjm?
Mmj = (Nm+Nj)*np.log((Nm+Nj)/(Tm+Tj)) - (Nm)*np.log((Nm/Tm))-(Nj)*np.log((Nj/Tj)); # Eq 11
idx = np.where(np.ndarray.flatten(np.triu(m,1),'F')>0)[0] # not the same as Femius' idx, but Python indexes start at 0
winner_idx = np.argmax( | np.ndarray.flatten(Mmj) | numpy.ndarray.flatten |
import torch.utils.data as data
from PIL import Image
import os
import os.path
import torch
import numpy as np
import torchvision.transforms as transforms
from libs.transformations import euler_matrix
import argparse
import time
import random
import numpy.ma as ma
import copy
import math
import scipy.misc
import scipy.io as scio
import cv2
import _pickle as cPickle
from skimage.transform import resize
import matplotlib.pyplot as plt
class Dataset(data.Dataset):
def __init__(self, mode, root, add_noise, num_pt, num_cates, count, cate_id, w_size, occlude=False):
# num_cates is the total number of categories gonna be preloaded from dataset, cate_id is the category need to be trained.
self.root = root
self.add_noise = add_noise
self.mode = mode
self.num_pt = num_pt
self.occlude = occlude
self.num_cates = num_cates
self.back_root = '{0}/train2017/'.format(self.root)
self.w_size = w_size + 1
self.cate_id = cate_id
# Path list: obj_list[], real_obj_list[], back_list[],
self.obj_list = {}
self.obj_name_list = {}
self.cate_set = [1, 2, 3, 4, 5, 6]
self.real_oc_list = {}
self.real_oc_name_set = {}
for ca in self.cate_set:
if ca != self.cate_id:
real_oc_name_list = os.listdir('{0}/data_list/real_{1}/{2}/'.format(self.root, self.mode, str(ca)))
self.real_oc_name_set[ca] = real_oc_name_list
del self.cate_set[self.cate_id - 1]
# Get all the occlusions.
# print(self.real_oc_name_set)
for key in self.real_oc_name_set.keys():
for item in self.real_oc_name_set[key]:
self.real_oc_list[item] = []
input_file = open(
'{0}/data_list/real_{1}/{2}/{3}/list.txt'.format(self.root, self.mode, str(key), item), 'r')
while 1:
input_line = input_file.readline()
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
self.real_oc_list[item].append('{0}/data/{1}'.format(self.root, input_line))
input_file.close()
if self.mode == 'train':
for tmp_cate_id in range(1, self.num_cates + 1):
# (nxm)obj_name_list[] contains the name list of the super dir(1a9e1fb2a51ffd065b07a27512172330) of training list txt file(train/16069/0008)
listdir = os.listdir('{0}/data_list/train/{1}/'.format(self.root, tmp_cate_id))
self.obj_name_list[tmp_cate_id] = []
for i in listdir:
if os.path.isdir('{0}/data_list/train/{1}/{2}'.format(self.root, tmp_cate_id, i)):
self.obj_name_list[tmp_cate_id].append(i)
# self.obj_name_list[tmp_cate_id] = os.listdir('{0}/data_list/train/{1}/'.format(self.root, tmp_cate_id))
self.obj_list[tmp_cate_id] = {}
for item in self.obj_name_list[tmp_cate_id]:
# print(tmp_cate_id, item)# item: 1a9e1fb2a51ffd065b07a27512172330
self.obj_list[tmp_cate_id][item] = []
input_file = open('{0}/data_list/train/{1}/{2}/list.txt'.format(self.root, tmp_cate_id, item), 'r')
while 1:
input_line = input_file.readline() # read list.txt(train/16069/0008)
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
# (nxmxk)obj_list is the real training data from {root}/data/train/16069/0008, 0008 here is just a prefix without the 5 suffix indicate the different file like _color.png/mask.png/depth.png/meta.txt_coord.png in 16069 dir.
self.obj_list[tmp_cate_id][item].append('{0}/data/{1}'.format(self.root, input_line))
input_file.close()
self.real_obj_list = {}
self.real_obj_name_list = {}
for tmp_cate_id in range(1, self.num_cates + 1):
# real_obj_name_list contains the real obj names from {}/data_list/real_train/1/ like bottle_blue_google_norm, bottle_starbuck_norm
self.real_obj_name_list[tmp_cate_id] = []
listdir = os.listdir('{0}/data_list/real_{1}/{2}/'.format(self.root, self.mode, tmp_cate_id))
for i in listdir:
if os.path.isdir('{0}/data_list/real_{1}/{2}/{3}'.format(self.root, self.mode, tmp_cate_id, i)):
self.real_obj_name_list[tmp_cate_id].append(i)
# self.real_obj_name_list[tmp_cate_id] = os.listdir('{0}/data_list/real_{1}/{2}/'.format(self.root, self.mode, tmp_cate_id))
self.real_obj_list[tmp_cate_id] = {}
for item in self.real_obj_name_list[tmp_cate_id]:
# print(tmp_cate_id, item) #item : bottle_blue_google_norm
self.real_obj_list[tmp_cate_id][item] = []
# real_train/scene_2/0000
input_file = open(
'{0}/data_list/real_{1}/{2}/{3}/list.txt'.format(self.root, self.mode, tmp_cate_id, item), 'r')
while 1:
input_line = input_file.readline()
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
# real_obj_list contains the prefix of files under the dir {}/data/real_train/scene_2/, which are all consecutive frames in video squence.
self.real_obj_list[tmp_cate_id][item].append('{0}/data/{1}'.format(self.root, input_line))
input_file.close()
self.back_list = []
input_file = open('dataset/train2017.txt', 'r')
while 1:
input_line = input_file.readline()
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
# back_list is the path list of the images in COCO dataset 2017 are about to be used in the training.
self.back_list.append(self.back_root + input_line) # back_root is the dir of COCO dataset train2017
input_file.close()
self.mesh = []
input_file = open('dataset/sphere.xyz', 'r')
while 1:
input_line = input_file.readline()
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
input_line = input_line.split(' ')
self.mesh.append([float(input_line[0]), float(input_line[1]), float(input_line[2])])
input_file.close()
self.mesh = np.array(self.mesh) * 0.6
self.cam_cx_1 = 322.52500
self.cam_cy_1 = 244.11084
self.cam_fx_1 = 591.01250
self.cam_fy_1 = 590.16775
self.cam_cx_2 = 319.5
self.cam_cy_2 = 239.5
self.cam_fx_2 = 577.5
self.cam_fy_2 = 577.5
self.xmap = np.array([[j for i in range(640)] for j in range(480)])
self.ymap = np.array([[i for i in range(640)] for j in range(480)])
self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.trancolor = transforms.ColorJitter(0.8, 0.5, 0.5, 0.05)
self.length = count
def get_occlusion(self, oc_obj, oc_frame, syn_or_real):
if syn_or_real:
cam_cx = self.cam_cx_1
cam_cy = self.cam_cy_1
cam_fx = self.cam_fx_1
cam_fy = self.cam_fy_1
else:
cam_cx = self.cam_cx_2
cam_cy = self.cam_cy_2
cam_fx = self.cam_fx_2
cam_fy = self.cam_fy_2
cam_scale = 1.0
oc_target = []
oc_input_file = open('{0}/model_scales/{1}.txt'.format(self.root, oc_obj), 'r')
for i in range(8):
oc_input_line = oc_input_file.readline()
if oc_input_line[-1:] == '\n':
oc_input_line = oc_input_line[:-1]
oc_input_line = oc_input_line.split(' ')
oc_target.append([float(oc_input_line[0]), float(oc_input_line[1]), float(oc_input_line[2])])
oc_input_file.close()
oc_target = np.array(oc_target)
r, t, _ = self.get_pose(oc_frame, oc_obj)
oc_target_tmp = np.dot(oc_target, r.T) + t
oc_target_tmp[:, 0] *= -1.0
oc_target_tmp[:, 1] *= -1.0
oc_rmin, oc_rmax, oc_cmin, oc_cmax = get_2dbbox(oc_target_tmp, cam_cx, cam_cy, cam_fx, cam_fy, cam_scale)
oc_img = Image.open('{0}_color.png'.format(oc_frame))
oc_depth = np.array(self.load_depth('{0}_depth.png'.format(oc_frame)))
oc_mask = (cv2.imread('{0}_mask.png'.format(oc_frame))[:, :, 0] == 255) # White is True and Black is False
oc_img = np.array(oc_img)[:, :, :3] # (3, 640, 480)
oc_img = np.transpose(oc_img, (2, 0, 1)) # (3, 640, 480)
oc_img = oc_img / 255.0
oc_img = oc_img * (~oc_mask)
oc_depth = oc_depth * (~oc_mask)
oc_img = oc_img[:, oc_rmin:oc_rmax, oc_cmin:oc_cmax]
oc_depth = oc_depth[oc_rmin:oc_rmax, oc_cmin:oc_cmax]
oc_mask = oc_mask[oc_rmin:oc_rmax, oc_cmin:oc_cmax]
return oc_img, oc_depth, oc_mask
def divide_scale(self, scale, pts):
pts[:, 0] = pts[:, 0] / scale[0]
pts[:, 1] = pts[:, 1] / scale[1]
pts[:, 2] = pts[:, 2] / scale[2]
return pts
def get_anchor_box(self, ori_bbox):
bbox = ori_bbox
limit = np.array(search_fit(bbox))
num_per_axis = 5
gap_max = num_per_axis - 1
small_range = [1, 3]
gap_x = (limit[1] - limit[0]) / float(gap_max)
gap_y = (limit[3] - limit[2]) / float(gap_max)
gap_z = (limit[5] - limit[4]) / float(gap_max)
ans = []
scale = [max(limit[1], -limit[0]), max(limit[3], -limit[2]), max(limit[5], -limit[4])]
for i in range(0, num_per_axis):
for j in range(0, num_per_axis):
for k in range(0, num_per_axis):
ans.append([limit[0] + i * gap_x, limit[2] + j * gap_y, limit[4] + k * gap_z])
ans = np.array(ans)
scale = np.array(scale)
ans = self.divide_scale(scale, ans)
return ans, scale
def change_to_scale(self, scale, cloud_fr, cloud_to):
cloud_fr = self.divide_scale(scale, cloud_fr)
cloud_to = self.divide_scale(scale, cloud_to)
return cloud_fr, cloud_to
def enlarge_bbox(self, target):
limit = np.array(search_fit(target))
longest = max(limit[1] - limit[0], limit[3] - limit[2], limit[5] - limit[4])
longest = longest * 1.3
scale1 = longest / (limit[1] - limit[0])
scale2 = longest / (limit[3] - limit[2])
scale3 = longest / (limit[5] - limit[4])
target[:, 0] *= scale1
target[:, 1] *= scale2
target[:, 2] *= scale3
return target
def load_depth(self, depth_path):
depth = cv2.imread(depth_path, -1)
if len(depth.shape) == 3:
depth16 = | np.uint16(depth[:, :, 1] * 256) | numpy.uint16 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import math
def countMx(data):
N = np.size(data)
Mx = np.sum(data)/N
return Mx;
def countD(data):
Mx = countMx(data)
N = np.size(data)
D = 0
for u in data:
D += (u - Mx) ** 2
D = D / N
return D;
# ОБЫЧНОЕ РАВНОМЕРНОЕ НЕПРЕРЫВНОЕ ПО ФОРМУЛЕ
def RNUNIF(b, a):
return (b - a) * | np.random.random() | numpy.random.random |
#!/usr/bin/env python
import math
import numpy as np
import rospy
import tf
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import Quaternion
from std_msgs.msg import Header
from mapping import MappingBase
from icp import toList,toArray
class Mapping(MappingBase):
def __init__(self):
super(Mapping,self).__init__()
# ray tracing update factor
self.weight=0.02
def update(self, laserPC, center):
"""
Use adding rules updating formula.
"""
# change the points into integers
start=list(np.round(np.array(center)/self.resolution).astype(int))
for point in laserPC.T:
end=list(np.round(point//self.resolution).astype(int))
pointList=self.line(start,end)
if np.size(pointList)==0:
return
# remove obstacle point.
if list(pointList[0])==end:
np.delete(pointList,0,axis=0)
elif list(pointList[-1])==end:
np.delete(pointList,-1,axis=0)
# origin bias
pointList+=self.origin
pointList=self.inBound(pointList)
# transmitted, decrease possibility
self.pmap[pointList[:,0],pointList[:,1]]-=self.weight
# reflected on obstacles.
endPoint=self.inBound( | np.array(end) | numpy.array |
import math, torch
import numpy as np
from numpy.random import normal as normrnd
from scipy.stats import multivariate_normal, norm
from scipy.linalg import sqrtm, expm
from pdb import set_trace as bp
from include.DNN import DNN
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from include.dataStructures.particle import Particle
class localize:
def __init__(self, numP, su, sz, distMap, mat, wayPts, R, dim, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dists = distMap
self.dim = dim
self.wayPts = wayPts
self.pts = self.convert(wayPts)
self.nAP = mat.numAPs
self.tx = mat.Tx
self.R = R
self.start = self.wayPts[0]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), | np.diag(sig) | numpy.diag |
# -*- coding: utf-8 -*-
import argparse
import json
from os import listdir
from os.path import join
import numpy as np
import pandas as pd
from src.utilities import mkdir_if_needed
def read_presentation_type(sequence):
"""
This function extracts the presentation_type variable from a sequence dictionary.
"""
if sequence["alternatives"][0] in [0, 1]:
return "alternatives"
elif sequence["attributes"][0] in ["p", "m"]:
return "attributes"
def compute_durations(sequence, alternative=None, attribute=None):
"""Computes the relative presentation duration of alternatives, attributes, or combinations of both for a given sequence.
Args:
sequence (dict): Sequence dictionary with keys "attributes", "alternatives" and "durations", each containing a list.
alternative (int, optional): Index of alternative for which overall relative duration should be computed. Defaults to None.
attribute (str, optional): Attribute for which overall relative duration should be computed. For example "p" or "m". Defaults to None.
Returns:
float: Relative duration measure.
"""
if alternative is not None:
alt_mask = np.array(
[alt in [alternative, "all"] for alt in sequence["alternatives"]]
)
else:
alt_mask = np.ones(len(sequence["alternatives"])).astype(bool)
if attribute is not None:
att_mask = np.array(
[att in [attribute, "all"] for att in sequence["attributes"]]
)
else:
att_mask = np.ones(len(sequence["attributes"])).astype(bool)
g = np.sum(np.array(sequence["durations"])[alt_mask & att_mask]) / np.sum(
np.array(sequence["durations"])
)
return g
def add_duration_vars(df):
"""Adds variables for relative durations towards alernatives and attributes.
Args:
df (pandas.DataFrame): Dataframe with `sequence` variable containing the presentation sequence.
Returns:
pandas.DataFrame: The DataFrame with added variables.
"""
for alt in [0, 1]:
df[f"g{alt}r"] = df.apply(
lambda x: compute_durations(json.loads(x["sequence"]), alternative=alt),
axis=1,
)
for att in ["p", "m"]:
df[f"g{att}r"] = df.apply(
lambda x: compute_durations(json.loads(x["sequence"]), attribute=att),
axis=1,
)
# Normalize durations to 1 in each trial
df["g0"] = df["g0r"] / df[["g0r", "g1r"]].sum(axis=1)
df["g1"] = df["g1r"] / df[["g0r", "g1r"]].sum(axis=1)
df["gm"] = df["gmr"] / df[["gmr", "gpr"]].sum(axis=1)
df["gp"] = df["gpr"] / df[["gmr", "gpr"]].sum(axis=1)
return df.drop(["g0r", "g1r", "gmr", "gpr"], axis=1)
def add_last_stage_favours_var(df):
"""Adds variable that describes which alternative is favoured by the last presentation step in the sequence.
Args:
df (pandas.DataFrame): DataFrame with conditions. Must contain columns `presentation`, `targetFirst`, `target`, `other`, `p0`, `p1`, `m0`, `m1`.
Returns:
pandas.DataFrame: The DataFrame with added `lastFavours` column.
"""
df["last_stage_favours"] = np.where(
df["presentation"] == "alternatives",
df["sequence"].apply(lambda x: json.loads(x)["alternatives"][-1]),
np.where(
df["presentation"] == "attributes",
np.where(
df["sequence"].apply(lambda x: json.loads(x)["attributes"][-1] == "p"),
df["higher_p"],
df["higher_m"],
),
np.nan,
),
).astype(float)
return df
def add_duration_favours_var(choices):
# Add target variable, coding which alternative is favoured by hypothesized duration effect
choices["duration_favours"] = np.where(
choices["condition"].str.startswith("exp_"),
np.where(
choices["presentation"] == "alternatives",
choices[["g0", "g1"]].idxmax(axis=1).str[1],
np.where(
choices[["gp", "gm"]].idxmax(axis=1).str[1] == "p",
choices["higher_p"],
choices["higher_m"],
),
),
np.nan,
).astype(float)
return choices
def add_misc_variables(choices):
# Add necessary variables
choices["label0"] = np.where(
choices["condition"].str.startswith("catch"),
"dominated",
np.where(choices["higher_p"] == 0, "higher_p", "higher_m"),
)
choices["label1"] = np.where(
choices["condition"].str.startswith("catch"),
"dominant",
np.where(choices["higher_p"] == 1, "higher_p", "higher_m"),
)
choices["duration_favours_str"] = np.where(
choices["duration_favours"] == 0,
choices["label0"],
np.where(choices["duration_favours"] == 1, choices["label1"], np.nan),
)
choices["last_stage_favours_str"] = np.where(
choices["last_stage_favours"] == 0,
choices["label0"],
np.where(choices["last_stage_favours"] == 1, choices["label1"], np.nan),
)
choices["ev0"] = choices["p0"] * choices["m0"]
choices["ev1"] = choices["p1"] * choices["m1"]
choices["delta_ev"] = choices["ev0"] - choices["ev1"]
choices["delta_ev_z"] = (
choices["delta_ev"] - choices["delta_ev"].mean()
) / choices["delta_ev"].std(ddof=1)
choices["choose_higher_p"] = choices["choice"] == choices["higher_p"]
choices["by_attribute"] = choices["presentation"] == "attributes"
choices["left_alternative"] = np.where(
choices["pL"] == choices["p0"],
0,
np.where(choices["pL"] == choices["p1"], 1, np.nan),
)
return choices
def preprocess_choice_data(raw_data):
"""
This function extracts and processes choice data from raw single subject jsPsych data.
"""
# Extract only choice data
choices = (
raw_data.loc[
(raw_data["trial_type"] == "two-gamble-sequence")
& ~(raw_data["condition.1"].str.startswith("practice_"))
][
[
"condition.1",
"rt",
"key_press",
"choice",
"p0",
"p1",
"m0",
"m1",
"pL",
"sequence",
"webgazer_data",
]
]
.rename({"condition.1": "condition"}, axis=1)
.reset_index(drop=True)
.astype({"p0": float, "p1": float, "m0": float, "m1": float, "pL": float})
)
# Adjust outcome values
choices[["m0", "m1"]] *= 10
# Handle missing responses, recode choice to integer
for var in ["choice", "rt"]:
choices[var] = | np.where(choices[var] == '"', np.nan, choices[var]) | numpy.where |
import pandas as pd
import numpy as np
import pvlib
import trimesh
import meshio
from ..client.client import Client
from copy import copy
import pickle
from pathlib import Path
import os
def generate_irradiation_vector(time, north_angle=0):
data, metadata = pvlib.iotools.read_epw(r'resources/AUT_Vienna.Schwechat.110360_IWEC.epw')
location = pvlib.location.Location.from_epw(metadata)
solar_position = location.get_solarposition(time)
phi = np.deg2rad(- (solar_position.azimuth.values + north_angle))
theta = np.deg2rad(solar_position.elevation.values)
cos_theta = np.cos(theta)
irradiation_vector = np.zeros([time.shape[0], 3], dtype=np.float32)
irradiation_vector[:, 0] = - cos_theta * np.cos(phi)
irradiation_vector[:, 1] = - cos_theta * np.sin(phi)
irradiation_vector[:, 2] = - np.sin(theta)
df = pd.DataFrame(index=time,
columns=['irradiation_vector'])
df['irradiation_vector'] = [x for x in irradiation_vector]
return df
def create_sun_window(mesh, irradiation_vector):
n = irradiation_vector.shape[0]
u, v = two_orthogonal_vectors(irradiation_vector)
sun_cs = np.empty((n, 3, 3))
sun_cs[:, 0, :] = u
sun_cs[:, 1, :] = v
sun_cs[:, 2, :] = irradiation_vector
rectangles = np.empty((n, 4, 3))
oriented_mesh_corners = trimesh.bounds.corners(mesh.bounding_box_oriented.bounds).T
for i in range(n):
rot = sun_cs[i, :, :]
rectangle = calc_local_rect(rot, oriented_mesh_corners)
rectangles[i, :, :] = np.linalg.inv(rot).dot(rectangle.T).T
return rectangles
def two_orthogonal_vectors(vector):
if isinstance(vector, pd.DataFrame):
vector = vector.values
if vector.shape.__len__() == 1:
vector = np.array([vector])
x, y = generate_basis_vectorized(vector)
return x, y
def generate_basis_vectorized(z):
"""
from: trimesh.util.generate_basis
Generate an arbitrary basis (also known as a coordinate frame)
from a given z-axis vector.
Parameters
------------
z : (3,) float
A vector along the positive z-axis.
epsilon : float
Numbers smaller than this considered zero.
Returns
---------
x : (3,) float
Vector along x axis.
y : (3,) float
Vector along y axis.
z : (3,) float
Vector along z axis.
"""
epsilon = 1e-12
# X as arbitrary perpendicular vector
x = np.zeros((z.shape[0], 3))
x[:, 0] = -z[:, 1]
x[:, 1] = z[:, 0]
# avoid degenerate case
x_norm = trimesh.util.row_norm(x)
ind1 = x_norm < epsilon
x[ind1, 0] = -z[ind1, 2]
x[ind1, 1] = z[ind1, 1]
x[ind1, 2] = z[ind1, 0]
x[ind1, :] /= trimesh.util.row_norm(x[ind1, :])[:, None]
x[np.logical_not(ind1), :] /= trimesh.util.row_norm(x[np.logical_not(ind1), :])[:, None]
# get perpendicular Y with cross product
y = np.cross(z, x)
return x, y
def calc_local_rect(rot_mat, oriented_mesh_corners):
import cv2
local_oriented_corners = rot_mat.dot(oriented_mesh_corners).T
z_translation = -abs(min(local_oriented_corners[:, 2]) * 1.1)
rec = | np.zeros((4, 3)) | numpy.zeros |
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the xgraph partitioning functionality"""
import os
import unittest
import numpy as np
# ! Important for device registration
import pyxir as px
from pyxir.graph.layer.xlayer import XLayer, ConvData, BatchData
from pyxir.graph.partitioning.xgraph_partitioner import XGraphPartitioner
from pyxir.graph.xgraph_factory import XGraphFactory
from pyxir.target_registry import TargetRegistry, register_op_support_check
import logging
logging.basicConfig()
logger = logging.getLogger("pyxir")
# logger.setLevel(logging.DEBUG)
class TestXGraphPartitioner(unittest.TestCase):
xgraph_partitioner = XGraphPartitioner()
xgraph_factory = XGraphFactory()
@classmethod
def setUpClass(cls):
def xgraph_build_func(xgraph):
raise NotImplementedError("")
def xgraph_optimizer(xgraph):
raise NotImplementedError("")
def xgraph_quantizer(xgraph):
raise NotImplementedError("")
def xgraph_compiler(xgraph):
raise NotImplementedError("")
target_registry = TargetRegistry()
target_registry.register_target(
"test",
xgraph_optimizer,
xgraph_quantizer,
xgraph_compiler,
xgraph_build_func,
)
@register_op_support_check("test", "Convolution")
def conv_op_support(X, bXs, tXs):
return True
@register_op_support_check("test", "Pooling")
def pooling_op_support(X, bXs, tXs):
return True
@register_op_support_check("test", "Concat")
def concat_op_support(X, bXs, tXs):
return False
@register_op_support_check("test", "Eltwise")
def eltwise_op_support(X, bXs, tXs):
return True
@register_op_support_check("test", "ReLU")
def relu_op_support(X, bXs, tXs):
return True
@classmethod
def tearDownClass(cls):
target_registry = TargetRegistry()
target_registry.unregister_target("test")
def test_basic(self):
x1 = px.ops.input("in1", shape=[1, 1, 4, 4])
x2 = px.ops.input("in2", shape=[1, 2, 2, 2])
w1 = px.ops.constant("weight", np.ones((2, 1, 2, 2), dtype=np.float32))
conv = px.ops.conv2d(
op_name="conv1",
input_layer=x1,
weights_layer=w1,
kernel_size=[2, 2],
strides=[1, 1],
padding_hw=[0, 0, 0, 0],
dilation=[1, 1],
groups=1,
channels=2,
data_layout="NCHW",
kernel_layout="OIHW",
)
pool = px.ops.pool2d(
op_name="pool1", input_layer=conv, pool_type="Avg", pool_size=[2, 2],
)
add = px.ops.eltwise("add1", pool, x2)
net = [x1, x2, conv, pool, add]
xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
p_xgraph = px.partition(xgraph, ["test"])
assert len(p_xgraph.get_layer_names()) == 5
assert p_xgraph.get_subgraph_names() == ["xp0"]
p_xlayers = p_xgraph.get_layers()
assert p_xlayers[0].type[0] in ["Input"]
assert p_xlayers[1].type[0] in ["Convolution"]
assert p_xlayers[2].type[0] in ["Pooling"]
assert p_xlayers[3].type[0] in ["Input"]
assert p_xlayers[4].type[0] in ["Eltwise"]
assert p_xlayers[0].target == "cpu"
assert p_xlayers[1].target == "test"
assert p_xlayers[2].target == "test"
assert p_xlayers[3].target == "cpu"
assert p_xlayers[4].target == "test"
assert p_xlayers[0].subgraph is None
assert p_xlayers[1].subgraph == "xp0"
assert p_xlayers[2].subgraph == "xp0"
assert p_xlayers[3].subgraph is None
assert p_xlayers[4].subgraph == "xp0"
subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(p_xgraph)
assert len(subgraphs) == 1
xp0 = subgraphs[0]
assert xp0.name == "xp0"
xp0_xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(
xp0.subgraph_data
)
assert xp0.bottoms == ["in1", "in2"]
assert xp0.tops == []
assert xp0.shapes == [[-1, 2, 2, 2]]
assert xp0.sizes == [8]
assert len(xp0_xgraph) == 5
xp0_layers = xp0_xgraph.get_layers()
assert xp0_layers[0].type[0] == "Input"
assert xp0_layers[0].layer[0] == "conv1"
assert xp0_layers[1].type[0] == "Convolution"
assert xp0_layers[2].type[0] == "Pooling"
assert xp0_layers[3].type[0] == "Input"
assert xp0_layers[4].type[0] == "Eltwise"
assert xp0_layers[0].bottoms == []
assert xp0_layers[0].tops == ["conv1"]
assert xp0_layers[1].bottoms == ["xinput0"]
assert xp0_layers[1].tops == ["pool1"]
assert xp0_layers[2].bottoms == ["conv1"]
assert xp0_layers[2].tops == ["add1"]
def test_interrupt_partition_in_add_branch(self):
x = px.ops.input("in1", shape=[1, 28, 28, 2028])
w1 = px.ops.constant("weight", np.ones((2048, 2048, 1, 1), dtype=np.float32))
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=x,
weights_layer=w1,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0, 0, 0],
dilation=[1, 1],
groups=1,
channels=2048,
data_layout="NHWC",
kernel_layout="OIHW",
)
r1 = px.ops.relu("r1", [conv1])
w2 = px.ops.constant("weight", np.ones((512, 2048, 1, 1), dtype=np.float32))
conv2 = px.ops.conv2d(
op_name="conv2",
input_layer=r1,
weights_layer=w2,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0, 0, 0],
dilation=[1, 1],
groups=1,
channels=512,
data_layout="NHWC",
kernel_layout="OIHW",
)
sigm = px.ops.sigmoid("sigm", [conv2]) # Unsupported layer
w3 = px.ops.constant("weight", np.ones((2048, 512, 1, 1), dtype=np.float32))
conv3 = px.ops.conv2d(
op_name="conv3",
input_layer=sigm,
weights_layer=w3,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0, 0, 0],
dilation=[1, 1],
groups=1,
channels=2048,
data_layout="NHWC",
kernel_layout="OIHW",
) # Although this layer is supported, it should not be in the partition
add = px.ops.eltwise(
"add", r1, conv3
) # Although this layer is supported, it should not be in the partition
net = [x, conv1, r1, conv2, sigm, conv3, add]
xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
p_xgraph = px.partition(xgraph, ["test"])
assert len(p_xgraph.get_layer_names()) == 7
assert p_xgraph.get_subgraph_names() == ["xp0"]
p_xlayers = p_xgraph.get_layers()
assert p_xgraph.get("in1").target == "cpu"
assert p_xgraph.get("conv1").target == "test"
assert p_xgraph.get("r1").target == "test"
assert p_xgraph.get("conv2").target == "test"
assert p_xgraph.get("sigm").target == "cpu"
assert p_xgraph.get("conv3").target == "cpu"
assert p_xgraph.get("add").target == "cpu"
subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(p_xgraph)
assert len(subgraphs) == 1
xp0 = subgraphs[0]
assert xp0.name == "xp0"
xp0_xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(
xp0.subgraph_data
)
assert xp0.bottoms == ["in1"]
assert xp0.tops == ["add", "sigm"]
assert xp0.shapes == [[-1, 28, 28, 2048], [-1, 28, 28, 512]]
assert xp0.sizes == [28 * 28 * 2048, 28 * 28 * 512]
assert len(xp0_xgraph) == 4
xp0_layers = xp0_xgraph.get_layers()
assert xp0_layers[0].type[0] == "Input"
assert xp0_layers[0].layer[0] == "conv1"
assert xp0_layers[1].type[0] == "Convolution"
assert xp0_layers[2].type[0] == "ReLU"
assert xp0_layers[3].type[0] == "Convolution"
def test_complete_partition(self):
x = px.ops.input("in1", shape=[1, 1, 4, 4])
w1 = px.ops.constant("weight", np.ones((2, 1, 2, 2), dtype=np.float32))
conv = px.ops.conv2d(
op_name="conv1", input_layer=x, weights_layer=w1, kernel_size=[2, 2],
)
pool = px.ops.pool2d(
op_name="pool1", input_layer=conv, pool_type="Avg", pool_size=[2, 2],
)
net = [x, conv, pool]
xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
p_xgraph = px.partition(xgraph, ["test"])
assert len(p_xgraph.get_layer_names()) == 3
assert p_xgraph.get_subgraph_names() == ["xp0"]
p_xlayers = p_xgraph.get_layers()
assert p_xlayers[0].type[0] in ["Input"]
assert p_xlayers[1].type[0] in ["Convolution"]
assert p_xlayers[2].type[0] in ["Pooling"]
assert p_xlayers[0].target == "cpu"
assert p_xlayers[1].target == "test"
assert p_xlayers[2].target == "test"
assert p_xlayers[0].subgraph is None
assert p_xlayers[1].subgraph == "xp0"
assert p_xlayers[2].subgraph == "xp0"
subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(p_xgraph)
assert len(subgraphs) == 1
xp0 = subgraphs[0]
assert xp0.name == "xp0"
xp0_xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(
xp0.subgraph_data
)
assert xp0.bottoms == ["in1"]
assert xp0.tops == []
assert xp0.shapes == [[-1, 2, 2, 2]]
assert xp0.sizes == [8]
assert xp0.attrs["target"] == "test"
assert xp0.attrs["__bottom_tensors"] == {"xinput0": ["in1"]}
assert xp0.attrs["orig_bottom_tensors"] == {"xinput0": ["in1"]}
assert xp0.attrs["__top_tensors"] == {"pool1": []}
assert xp0.attrs["orig_top_tensors"] == {"pool1": []}
assert len(xp0_xgraph) == 3
xp0_layers = xp0_xgraph.get_layers()
assert xp0_layers[0].type[0] == "Input"
assert xp0_layers[0].layer[0] == "conv1"
assert xp0_layers[1].type[0] == "Convolution"
assert xp0_layers[2].type[0] == "Pooling"
assert xp0_layers[0].bottoms == []
assert xp0_layers[0].tops == ["conv1"]
assert xp0_layers[1].bottoms == ["xinput0"]
assert xp0_layers[1].tops == ["pool1"]
assert xp0_layers[2].bottoms == ["conv1"]
assert xp0_layers[2].tops == []
def test_two_partitions_through_interruption(self):
# A layer inside a residual type branch os not supported
# Here: BatchNorm
x1 = px.ops.input("in1", shape=[1, 1, 4, 4])
w1 = px.ops.constant("weight", np.ones((2, 1, 2, 2), dtype=np.float32))
conv1 = px.ops.conv2d(
op_name="conv1", input_layer=x1, weights_layer=w1, kernel_size=[2, 2],
) # 1, 2, 3, 3
pool = px.ops.pool2d(
op_name="pool1",
input_layer=conv1,
pool_type="Avg",
pool_size=[2, 2],
padding=[1, 1, 0, 0],
) # 1, 2, 3, 3
bn_mean = px.ops.constant("mean", np.ones((2,), dtype=np.float32))
bn_var = px.ops.constant("var", np.ones((2,), dtype=np.float32))
bn_gamma = px.ops.constant("gamma", np.ones((2,), dtype=np.float32))
bn_beta = px.ops.constant("beta", np.ones((2,), dtype=np.float32))
bn = px.ops.batch_norm(
op_name="bn1",
input_layer=conv1,
mean_layer=bn_mean,
variance_layer=bn_var,
gamma_layer=bn_gamma,
beta_layer=bn_beta,
axis=1,
) # 1, 2, 3, 3
concat = px.ops.concat("concat1", [pool, bn], axis=1) # 1, 4, 3, 3
w2 = px.ops.constant("weight2", np.ones((6, 2, 2, 2), dtype=np.float32))
conv2 = px.ops.conv2d(
op_name="conv2",
input_layer=concat,
weights_layer=w2,
kernel_size=[2, 2],
padding_hw=[1, 1, 0, 0],
) # 1, 6, 3, 3
net = [x1, conv1, pool, bn, concat, conv2]
xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net)
p_xgraph = px.partition(xgraph, ["test"])
assert len(p_xgraph.get_layer_names()) == 6
assert p_xgraph.get_subgraph_names() == ["xp0"]
p_xlayers = p_xgraph.get_layers()
assert p_xlayers[0].type[0] in ["Input"]
assert p_xlayers[1].type[0] in ["Convolution"]
assert p_xlayers[2].type[0] in ["Pooling"]
assert p_xlayers[3].type[0] in ["BatchNorm"]
assert p_xlayers[4].type[0] in ["Concat"]
assert p_xlayers[5].type[0] in ["Convolution"]
assert p_xlayers[0].target == "cpu"
assert p_xlayers[1].target == "test"
assert p_xlayers[2].target == "test"
assert p_xlayers[3].target == "cpu"
assert p_xlayers[4].target == "cpu"
assert p_xlayers[5].target == "cpu"
assert p_xlayers[0].subgraph is None
assert p_xlayers[1].subgraph == "xp0"
assert p_xlayers[2].subgraph == "xp0"
assert p_xlayers[3].subgraph is None
assert p_xlayers[4].subgraph is None
assert p_xlayers[5].subgraph is None
assert p_xlayers[3].name == "bn1"
assert p_xlayers[3].bottoms == ["conv1"]
assert p_xlayers[3].tops == ["concat1"]
assert p_xlayers[4].name == "concat1"
assert p_xlayers[4].bottoms == ["pool1", "bn1"]
assert p_xlayers[4].tops == ["conv2"]
subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs(p_xgraph)
assert len(subgraphs) == 1
xp0 = subgraphs[0]
assert xp0.name == "xp0"
xp0_xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(
xp0.subgraph_data
)
assert xp0.bottoms == ["in1"]
assert xp0.tops == ["bn1", "concat1"]
assert xp0.shapes == [[-1, 2, 3, 3], [-1, 2, 3, 3]]
assert xp0.sizes == [18, 18]
assert xp0.attrs["target"] == "test"
assert xp0.attrs["__bottom_tensors"] == {"xinput0": ["in1"]}
assert xp0.attrs["orig_bottom_tensors"] == {"xinput0": ["in1"]}
assert xp0.attrs["__top_tensors"] == {"conv1": ["bn1"], "pool1": ["concat1"]}
assert xp0.attrs["orig_top_tensors"] == {"conv1": ["bn1"], "pool1": ["concat1"]}
assert len(xp0_xgraph) == 3
xp0_layers = xp0_xgraph.get_layers()
assert [X.name for X in xp0_xgraph.get_input_layers()] == ["xinput0"]
# TODO: XGraph only recognizes output layers when they have no top
# layers
assert [X.name for X in xp0_xgraph.get_output_layers()] == ["pool1"]
assert xp0_layers[0].type[0] == "Input"
assert xp0_layers[0].layer[0] == "conv1"
assert xp0_layers[1].type[0] == "Convolution"
assert xp0_layers[2].type[0] == "Pooling"
assert xp0_layers[0].bottoms == []
assert xp0_layers[0].tops == ["conv1"]
assert xp0_layers[1].bottoms == ["xinput0"]
assert xp0_layers[1].tops == ["pool1"]
assert xp0_layers[2].bottoms == ["conv1"]
assert xp0_layers[2].tops == []
def test_multiple_partitions(self):
x1 = px.ops.input("in1", shape=[1, 1, 4, 4])
x2 = px.ops.input("in2", shape=[1, 2, 2, 2])
w1 = px.ops.constant("weight", np.ones((2, 1, 2, 2), dtype=np.float32))
conv1 = px.ops.conv2d(
op_name="conv1", input_layer=x1, weights_layer=w1, kernel_size=[2, 2],
) # 1, 2, 3, 3
pool = px.ops.pool2d(
op_name="pool1", input_layer=conv1, pool_type="Avg", pool_size=[2, 2],
) # 1, 2, 2, 2
add = px.ops.eltwise("add1", pool, x2) # 1, 2, 2, 2
bn_mean = px.ops.constant("mean", np.ones((2,), dtype=np.float32))
bn_var = px.ops.constant("var", | np.ones((2,), dtype=np.float32) | numpy.ones |
import tensorflow as tf
import numpy as np
import cv2
import imutils
import math
import os
import shutil
import random
from tensorflow.python.ops.gen_array_ops import fill
def _get_legs(label):
# @brief Extract legs from given binary label.
# @param label Binary image u8c1 where 0 - empty space and ~255 - leg.
# @return List of legs as list of pairs [y,x] where each pairs describes center coordinates of one leg.
label_squeezed = np.squeeze(label.copy())
cnts = cv2.findContours(
label_squeezed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
legs = []
for c in cnts:
M = cv2.moments(c)
# There are no legs in this label.
if M["m00"] == 0:
continue
# Compute the center of the contour.
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
coords = [y, x]
legs.append(coords)
return legs
def _get_distances(y, x, legs):
# @brief Get list of euclidean distances from given pixel [y,x] to each leg.
# @param y Y coordinate of pixel.
# @param x X coordinate of pixel.
# @return list of euclidean distances to each leg.
distances = []
for leg in legs:
leg_x = leg[1]
leg_y = leg[0]
d = math.sqrt(
math.pow(leg_x - x, 2) +
math.pow(leg_y - y, 2)
)
distances.append(d)
return distances
def _get_leg_weights_for_label(height, width, legs, w0, sigma):
# @brief Get matrix with weights computed based on euclidean distance from each pixel to closes leg.
# This function is a modification of original unet's implementation of distance based on
# distance to border of two cells.
# @param height Height of processed image.
# @param width Width of processed image.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
den = 2 * sigma * sigma
weight_matrix = np.zeros([height, width], dtype=np.float32)
for y in range(height):
for x in range(width):
distances = _get_distances(y, x, legs)
if len(distances) == 0:
d1 = math.sqrt(
math.pow(width, 2) +
math.pow(height, 2)
) * 2
else:
d1 = min(distances)
weight = w0 * math.exp(-(math.pow(d1, 2))/(den))
weight_matrix[y, x] = weight
return weight_matrix
def _get_class_weights_for_label(label):
# @brief Get weight matrix to balance class inequality.
# @param label Label to generate weight matrix for.
# Return Weigh matrix with class weights.
white_pixels = np.count_nonzero(label)
total_pixels = label.shape[0] * label.shape[1]
black_weight = white_pixels / total_pixels
white_weight = 1.0 - black_weight
weight_matrix = np.where(label > 0, white_weight, black_weight)
return weight_matrix
def _get_weights_for_label(label, height, width, legs, w0, sigma):
# @brief Generate weight matrix for class equalizing and distance from legs.
# @param label Label to generate weights for.
# @param height Height of processed image.
# @param width Width of processed image.
# @param legs List of leg coordinates acquired from _get_legs.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Matrix with equal shape to label's containing weights.
class_weights = _get_class_weights_for_label(label)
leg_weights = _get_leg_weights_for_label(height, width, legs, w0, sigma)
return class_weights + leg_weights
def _generate_weights(train_labels, w0, sigma):
# @brief Generate weights for all labels.
# @param w0 Tuning parameter. See unet's paper for details.
# @param sigma Tuning parameter. See unet's paper for details.
# @return Numpy array with weight matrices.
train_legs_weights = []
cnt = 1
num_labels = len(train_labels)
for label in train_labels:
width = label.shape[2]
height = label.shape[1]
legs = _get_legs(label)
train_legs_weights.append(_get_weights_for_label(
label, height, width, legs, w0, sigma))
print("Processed sample %d of %d." % (cnt, num_labels))
cnt += 1
return np.array(train_legs_weights)
def _preprocess_inputs_labels(train_inputs, train_labels):
# @brief Preprocess inputs and labels from uint8 (0 - 255) to float32 (0 - 1).
# @param train_inputs Inputs to process.
# @param train_labels Labels to process.
# @return preprocessed inputs and labels.
train_inputs_processed = np.zeros(train_inputs.shape)
train_labels_processed = np.zeros(train_labels.shape)
num_labels = len(train_labels)
for i in range(len(train_inputs)):
input_sample = np.ndarray.astype(train_inputs[i], np.float32)
label_sample = | np.ndarray.astype(train_labels[i], np.float32) | numpy.ndarray.astype |
import os
import sys
import numpy as np
import re
import pyBigWig
def anchor (input,sample,ref): # input 1d array
sample.sort()
ref.sort()
# 0. create the mapping function
index=np.array(np.where(np.diff(sample)!=0))+1
index=index.flatten()
x=np.concatenate((np.zeros(1),sample[index])) # domain
y=np.zeros(len(x)) # codomain
for i in np.arange(0,len(index)-1,1):
start=index[i]
end=index[i+1]
y[i+1]=np.mean(ref[start:end])
i+=1
start=index[i]
end=len(ref)
y[i+1]=np.mean(ref[start:end])
# 1. interpolate
output=np.interp(input, x, y)
# no extrapolate - simply map to the ref max to remove extremely large values
# 2. extrapolate
# degree=1 # degree of the fitting polynomial
# num=10 # number of positions for extrapolate
# f1=np.poly1d(np.polyfit(sample[-num:],ref[-num:],degree))
# f2=np.poly1d(np.polyfit(sample[:num],ref[:num],degree))
# output[input>sample[-1]]=f1(input[input>sample[-1]])
# output[input<sample[0]]=f2(input[input<sample[0]])
return output
chr_all=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX']
num_bp=[248956422,242193529,198295559,190214555,181538259,170805979,159345973,145138636,138394717,133797422,135086622,133275309,114364328,107043718,101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,156040895]
chr_len={}
for i in np.arange(len(chr_all)):
chr_len[chr_all[i]]=num_bp[i]
path1='./bigwig/'
path2='./signal_anchored_final/'
os.system('mkdir -p ' + path2)
path3='./sample_for_anchor_final/'
assay_all=['M01','M02','M16','M17','M18','M20','M22','M29']
for i in | np.arange(1,52) | numpy.arange |
from src.models.model_abstract import ImageClassificationAbstract
import cv2
from skimage.feature import local_binary_pattern
from skimage.feature import hog
from scipy.stats import itemfreq
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from sklearn.svm import LinearSVC
import os
TARGET_SIZE = (400, 600)
AUGMENTED_SIZE = 200
class TypeClassificationModel(ImageClassificationAbstract):
def __init__(self, *args, **kwargs):
ImageClassificationAbstract.__init__(self, *args, **kwargs)
# Override Abstract Methods:
def train(self, image_paths_list):
# accepts list of image paths, trains model, stores trained model
x_data, y_data = self.get_x_y_data(image_paths_list, augment=False)
# Scale features
# x_data = self.x_scaler.fit_transform(x_data)
# Train model
model = LinearSVC(random_state=0, class_weight="balanced")
model.fit(x_data, y_data)
# Store trained model
self.set_model(model)
return None
def predict(self, image_paths_list):
# accepts list of image paths, returns predicted classes
x_data, y_data = self.get_x_y_data(image_paths_list, augment=False)
# Get predictions
predictions = self.get_model().predict(x_data)
return predictions
def get_x_y_data(self, image_paths_list, augment=False):
# TODO make preprocessing parallel, and explore storing and retrieving preprocessed images
# Load images
images_array = self.get_images_array(image_paths_list)
# Preprocess x_data
x_data = self.preprocess_images(images_array)
# Extract y_data
# Get file names from image paths
file_names = [os.path.basename(image_path) for image_path in image_paths_list]
# file_names = [image_path.split("/")[-1] for image_path in image_paths_list]
y_data = self.get_classes_array(file_names)
# Augment data
if augment:
x_data, y_data = self.created_augmented_data(x_data, y_data)
# Get features
features_list = []
for image in x_data:
hog_features = self.hog_feature_extractor(image)
# Get LBP features
lbp_features = self.lbp_feature_feature_extractor(image)
# Combine features
features = []
features.extend(hog_features)
features.extend(lbp_features)
features_list.append(features)
# Convert features_list to array
x_data = np.array(features_list)
assert len(x_data.shape) > 1, "Mismatching features lengths: %s" % [len(x) for x in x_data]
return x_data, y_data
@staticmethod
def get_classes_array(image_names_list):
# accepts image path, returns image classes
classes = []
for file_name in image_names_list:
classes.append(file_name.split("_")[0])
return np.array(classes)
@staticmethod
def preprocess_images(images_array):
# accepts images array, return preprocessed images array
image_list = list(images_array)
for i in range(len(images_array)):
image = image_list[i]
# # accepts images array, return preprocessed images array
image = cv2.resize(image, TARGET_SIZE)
# # Image Enhancement (future enhancement, working)
# # Crop to only the object
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gray_blur = cv2.GaussianBlur(gray, (15, 15), 0)
# thresh = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 1)
# kernel = np.ones((5, 5), np.uint8)
# closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
# cont_img = closing.copy()
# _, contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sorted_contours = sorted(contours, key=lambda x: cv2.contourArea(x))
# rect = cv2.minAreaRect(sorted_contours[-1])
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# x1 = max(min(box[:, 0]), 0)
# y1 = max(min(box[:, 1]), 0)
# x2 = max(max(box[:, 0]), 0)
# y2 = max(max(box[:, 1]), 0)
#
# # Enhance
# image_cropped = image[y1:y2, x1:x2]
# lab = cv2.cvtColor(image_cropped, cv2.COLOR_BGR2LAB)
# l, a, b = cv2.split(lab)
# clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
# cl = clahe.apply(l)
# limg = cv2.merge((cl, a, b))
# image_cropped = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
# # Use fill
# image[y1:y2, x1:x2] = image_cropped
# # image = cv2.resize(image_cropped, TARGET_SIZE)
image_list[i] = image
return | np.array(image_list) | numpy.array |
""" methods for Maximum Likelihood analyses on IGM surveys
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import pdb
import numpy as np
from scipy.interpolate import interp1d
from scipy.stats import kstest
def powerlaw_loxz(lls_zabs, z_gz, gz, guess, zpivot, rnga=(-0.5,0.5), rngl=(-0.2,0.1), ngrid=100):
"""
Ported from SDSS LLS paper
Functional form l = k (1+z/1+zpivot)^a
Parameters
----------
lls_zabs
z_gz
gz
guess
zpivot
rnga
rngl
ngrid
Returns
-------
lik : ndarray
Normmalized likelihood function
lvec : ndarray
l* values for the grid
avec : ndarray
alpha values for the grid
"""
# Create vectors of alpha and l_0 values
lvec = guess[0] * 10.**(rngl[0] + (rngl[1]-rngl[0])*np.arange(ngrid)/(ngrid-1))
avec = guess[1] * 10.**(rnga[0] + (rnga[1]-rnga[0])*np.arange(ngrid)/(ngrid-1))
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#;; Positive term, i.e. h(z) evaluated at z_i
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#;; Evaluate z_i and sum
zi_nrm = (1+lls_zabs)/(1+zpivot)
sum_lnzi = np.sum(np.log(zi_nrm))
#;; Weight by alpha
asum_zi = avec * sum_lnzi
#;; lvec sum is trivial
nLLS = len(lls_zabs)
lterm = nLLS * np.log(lvec)
#;; Make the grids and add
lgrid = np.outer(lterm, np.ones(ngrid))
agrid = np.outer(np.ones(ngrid), asum_zi)
pos_grid = agrid + lgrid
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#;; Negative term :: Quasars
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#;; Alpha
nstep = len(gz)
dzstep = z_gz[1]-z_gz[0]
agrid = np.zeros((nstep, ngrid))
f_nrm = (1+z_gz)/(1+zpivot)
for kk in range(ngrid):
agrid[:,kk] = f_nrm**avec[kk]
#;; gofz
ggrid = np.outer(gz, np.ones(ngrid))
atot = dzstep * np.sum( agrid * ggrid, axis=0)
#; New grids
agrid = np.outer(np.ones(ngrid), atot)
lgrid = np.outer(lvec, np.ones(ngrid))
neg_grid = agrid * lgrid
#;;;;;;;;;;;;;;;;;;;;;;;;;;
#;; Likelihood
lik = pos_grid - neg_grid
maxL = np.max(lik)
nrm_lik = lik - maxL
# Write to disk
if True:
from astropy.io import fits
hdu = fits.PrimaryHDU(nrm_lik)
hdul = fits.HDUList([hdu])
hdul.writeto('nrm_lik.fits', overwrite=True)
# Unravel
ilmx, iamx = np.unravel_index(nrm_lik.argmax(), nrm_lik.shape)
print('Max: (l,a) = ', lvec[ilmx], avec[iamx])
# Return
return lik, lvec, avec
def cl_indices(lnL, cl, sigma=False):
""" Find the indices of a log-Likelihood grid encompassing a
given confidence interval
Parameters:
lnL: np.array
log-Likelihood image
sigma: bool, optional
Return as sigma values [not implemented]
Returns:
indices: Tuple of np.where output
"""
# Max
mxL = | np.max(lnL) | numpy.max |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([1,0,0,0,1,0,0,0,1]) | numpy.array |
import numpy as np
from numpy import save,load
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['font.size'] = 17
matplotlib.rcParams['font.family'] = 'serif'
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pickle
from scipy.interpolate import interp1d
import scipy.sparse as sps
from scipy.sparse import linalg as sps_linalg
import scipy.linalg as scipy_linalg
from importlib import reload
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.decomposition import PCA
from abc import ABC,abstractmethod
import time
import queue
import sys
import os
from os.path import join,exists
codefolder = "/home/jf4241/dgaf2"
os.chdir(codefolder)
from hier_cluster_obj import nested_kmeans,nested_kmeans_predict_batch
# TODO: turn this all time-dependent later. For now, keep it time-homogeneous.
class Function(ABC):
# General kind of function that could be a neural network or a Gaussian process or a linear basis...
def __init__(self,function_type):
self.function_type = function_type # linear_basis, gaussian_process, neural_network
super().__init__()
return
@abstractmethod
def evaluate_function(self,data):
# This should be done after the parameters are set, or at least initialized
# X.shape = (Nx,xdim) (possibly from a flattened array)
pass
@abstractmethod
def feynman_kac_rhs(self,data,src_fun,dirn=1):
Nx,Nt,xdim = data.X.shape
HX = (src_fun(data.X.reshape((Nx*Nt,xdim)))).reshape((Nx,Nt))
R = np.zeros(Nx)
for i in range(Nt-1):
if dirn==1:
dt = data.t_x[i+1] - data.t_x[i]
if dt<0: sys.exit("Ummm dt<0 forward")
R -= 0.5*(HX[:,i] + HX[:,i+1]) * dt * (data.first_exit_idx > i)
else:
dt = data.t_x[-i-1] - data.t_x[-i-2]
if dt<0: sys.exit("Ummm dt<0 backward. i={}, data.t_x={}, data.t_x[-i]={}, data.t_x[-i-1]={}".format(i,data.t_x,data.t_x[-i],data.t_x[-i-1]))
R -= 0.5*(HX[:,-i-1] + HX[:,-i-2]) * dt * (data.last_entry_idx < data.traj_length-i)
return R
def feynman_kac_lhs_FX(self,data,unk_fun,unk_fun_dim):
# First part of the left-hand side is to compute just F itself. This is expensive! Afterward we get LF and VF easily
Nx,Nt,xdim = data.X.shape
#FX = unk_fun(data).reshape((Nx,Nt,unk_fun_dim))
FX = (unk_fun(data.X.reshape((Nx*Nt,xdim)))).reshape((Nx,Nt,unk_fun_dim))
return FX
def feynman_kac_lhs_LF(self,data,FX,dirn=1):
# Second part of the left-hand side is to compute LF. (The stopped version.)
Nx,Nt,xdim = data.X.shape
if dirn == 1:
LF = FX[np.arange(Nx),data.first_exit_idx] - FX[:,0]
else:
LF = FX[np.arange(Nx),data.last_entry_idx] - FX[:,-1]
return LF
def feynman_kac_lhs_VX(self,data,pot_fun):
Nx,Nt,xdim = data.X.shape
VX = (pot_fun(data.X.reshape((Nx*Nt,xdim)))).reshape((Nx,Nt))
return VX
def feynman_kac_lhs_VF(self,data,VX,FX,Fdim,dirn=1):
Nx,Nt,xdim = data.X.shape
VF = np.zeros((Nx,Fdim))
#print("About to compute VF, whose shape is {}".format(VF.shape))
for i in range(Nt-1):
if dirn==1:
dt = data.t_x[i+1] - data.t_x[i]
VF += 0.5*((VX[:,i]*FX[:,i].T + VX[:,i+1]*FX[:,i+1].T) * dt * (data.first_exit_idx > i)).T
else:
dt = data.t_x[-i-1] - data.t_x[-i-2]
VF += 0.5*((VX[:,-i-1]*FX[:,-i-1].T + VX[:,-i-2]*FX[:,-i-2].T) * dt * (data.last_entry_idx < data.traj_length-i)).T
return VF
def feynman_kac_lhs(self,data,unk_fun,unk_fun_dim,pot_fun,dirn=1):
FX = self.feynman_kac_lhs_FX(data,unk_fun,unk_fun_dim)
LF = self.feynman_kac_lhs_LF(data,FX,dirn=dirn)
VX = self.feynman_kac_lhs_VX(data,pot_fun)
VF = self.feynman_kac_lhs_VF(data,VX,FX,unk_fun_dim,dirn=dirn)
return LF,VF,FX
@abstractmethod
def fit_data(self,X,bdy_dist):
# This is implemented differently depending on the type, and may or may not be data-dependent
# 1. LinearBasis: define basis functions (e.g. form cluster centers for MSM, or axes for PCA)
# 2. GaussianProcess: define mean and covariance functions
# 3. NeuralNetwork: define architecture and activation functions
# In all cases, get the capability to evaluate new points
# bndy_dist is a function of x and t
#@Nx,Nt,xdim = data.X.shape
#@N = Nx*Nt
#@X = data.X.reshape((N,xdim))
N,xdim = X.shape
# Now cluster
bdy_dist_x = bdy_dist(X)
bdy_idx = np.where(bdy_dist_x==0)[0]
iidx = np.setdiff1d(np.arange(N),bdy_idx)
print("len(bdy_idx) = {}".format(len(bdy_idx)))
print("len(iidx) = {}".format(len(iidx)))
self.fit_data_flat(X,iidx,bdy_idx,bdy_dist_x)
return
@abstractmethod
def fit_data_flat(self,X,iidx,bdy_idx,bdy_dist_x):
pass
@abstractmethod
def solve_boundary_value_problem(self,data,bdy_dist,bdy_fun):
# Solve the boundary value problem given a dataset of short trajectories (perhaps with different lengths and resolutions) by optimizing parameters.
# 1. LinearBasis: invert the matrix of basis function evaluations
# 2. GaussianProcess: invert the correlation matrix to derive the posterior mean and covariance
# 3. NeuralNetwork: optimize weights with SGD of some kind
pass
# Now implement the various instances
class LinearBasis(Function):
def __init__(self,basis_size,basis_type):
self.basis_size = basis_size
self.basis_type = basis_type
super().__init__('LinearBasis')
def feynman_kac_rhs(self,*args,**kwargs):
return super().feynman_kac_rhs(*args,**kwargs)
def feynman_kac_lhs(self,*args,**kwargs):
return super().feynman_kac_lhs(*args,**kwargs)
def evaluate_function(self,data):
# First evaluate the basis functions, then sum them together according to the coefficients (which have already been determined when this function is called)
Nx,Nt,xdim = data.X.shape
F = (self.bdy_fun(data.X.reshape((Nx*Nt,xdim)))).reshape((Nx,Nt))
F += self.evaluate_basis_functions(data).dot(self.coeffs)
return
@abstractmethod
def fit_data(self,data,bdy_dist):
return super().fit_data(data,bdy_dist)
def fit_data_flat(self,X,iidx,bdy_idx,bdy_dist_x):
pass
def compute_stationary_density(self,data):
# Keep it simple, stupid
bdy_dist = lambda x: np.ones(len(x)) # no boundaries
data.insert_boundaries(bdy_dist)
pot_fun = lambda x: np.zeros(len(x))
def unk_fun(X):
return self.evaluate_basis_functions(X,bdy_dist,const_fun_flag=True)
print("About to compute the Feynman-Kac LHS")
Lphi,Vphi,phi = self.feynman_kac_lhs(data,unk_fun,self.basis_size,pot_fun)
phi_Lphi = phi[:,0].T.dot(Lphi)
#sys.exit("phi_Lphi.dot(1) = {}".format(phi_Lphi.dot(np.ones(phi_Lphi.shape[1]))))
Q,R = | np.linalg.qr(phi_Lphi,mode='complete') | numpy.linalg.qr |
import numpy as np
from pyFAI.multi_geometry import MultiGeometry
from pyFAI.ext import splitBBox
def inpaint_saxs(imgs, ais, masks):
"""
Inpaint the 2D image collected by the pixel detector to remove artifacts in later data reduction
Parameters:
-----------
:param imgs: List of 2D image in pixel
:type imgs: ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as imgs)
:type masks: ndarray
"""
inpaints, mask_inpaints = [], []
for i, (img, ai, mask) in enumerate(zip(imgs, ais, masks)):
inpaints.append(ai.inpainting(img.copy(order='C'),
mask))
mask_inpaints.append(np.logical_not(np.ones_like(mask)))
return inpaints, mask_inpaints
def cake_saxs(inpaints, ais, masks, radial_range=(0, 60), azimuth_range=(-90, 90), npt_rad=250, npt_azim=250):
"""
Unwrapp the stitched image from q-space to 2theta-Chi space (Radial-Azimuthal angle)
Parameters:
-----------
:param inpaints: List of 2D inpainted images
:type inpaints: List of ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as inpaints)
:type masks: List of ndarray
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
:param npt_rad: number of point in the radial range
:type npt_rad: int
:param npt_azim: number of point in the azimuthal range
:type npt_azim: int
"""
mg = MultiGeometry(ais,
unit='q_A^-1',
radial_range=radial_range,
azimuth_range=azimuth_range,
wavelength=None,
empty=0.0,
chi_disc=180)
cake, q, chi = mg.integrate2d(lst_data=inpaints,
npt_rad=npt_rad,
npt_azim=npt_azim,
correctSolidAngle=True,
lst_mask=masks)
return cake, q, chi[::-1]
def integrate_rad_saxs(inpaints, ais, masks, radial_range=(0, 40), azimuth_range=(0, 90), npt=2000):
"""
Radial integration of transmission data using the pyFAI multigeometry module
Parameters:
-----------
:param inpaints: List of 2D inpainted images
:type inpaints: List of ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as inpaints)
:type masks: List of ndarray
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
:param npt: number of point of the final 1D profile
:type npt: int
"""
mg = MultiGeometry(ais,
unit='q_A^-1',
radial_range=radial_range,
azimuth_range=azimuth_range,
wavelength=None,
empty=-1,
chi_disc=180)
q, i_rad = mg.integrate1d(lst_data=inpaints,
npt=npt,
correctSolidAngle=True,
lst_mask=masks)
return q, i_rad
def integrate_azi_saxs(cake, q_array, chi_array, radial_range=(0, 10), azimuth_range=(-90, 0)):
"""
Azimuthal integration of transmission data using masked array on a caked images (image in 2-theta_chi space)
Parameters:
-----------
:param cake: 2D array unwrapped in 2th-chi space
:type cake: ndarray (same dimension as tth_array and chiarray)
:param q_array: 2D array containing 2th angles of each pixel
:type q_array: ndarray (same dimension as cake and chiarray)
:param chi_array: 2D array containing chi angles of each pixel
:type chi_array: ndarray (same dimension as cake and tth_array)
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
"""
q_mesh, chi_mesh = np.meshgrid(q_array, chi_array)
cake_mask = np.ma.masked_array(cake)
cake_mask = np.ma.masked_where(q_mesh < radial_range[0], cake_mask)
cake_mask = np.ma.masked_where(q_mesh > radial_range[1], cake_mask)
cake_mask = np.ma.masked_where(azimuth_range[0] > chi_mesh, cake_mask)
cake_mask = np.ma.masked_where(azimuth_range[1] < chi_mesh, cake_mask)
i_azi = cake_mask.mean(axis=1)
return chi_array, i_azi
def integrate_rad_gisaxs(img, q_par, q_per, bins=1000, radial_range=None, azimuth_range=None):
"""
Radial integration of Grazing incidence data using the pyFAI multigeometry module
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param bins: number of point of the final 1D profile
:type bins: int
:param img: 2D array containing the stitched intensity
:type img: ndarray
:param radial_range: q_par range (in A-1) at the which the integration will be done
:type radial_range: Tuple
:param azimuth_range: q_per range (in A-1) at the which the integration will be done
:type azimuth_range: Tuple
"""
# recalculate the q-range of the input array
q_h = np.linspace(q_par[0], q_par[-1], np.shape(img)[1])
q_v = np.linspace(q_per[0], q_per[-1], np.shape(img)[0])[::-1]
if radial_range is None:
radial_range = (0, q_h.max())
if azimuth_range is None:
azimuth_range = (0, q_v.max())
q_h_te, q_v_te = np.meshgrid(q_h, q_v)
tth_array = np.sqrt(q_h_te ** 2 + q_v_te ** 2)
chi_array = np.rad2deg(np.arctan2(q_h_te, q_v_te))
# Mask the remeshed array
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(img < 1E-5, img_mask)
img_mask = np.ma.masked_where(tth_array < radial_range[0], img_mask)
img_mask = np.ma.masked_where(tth_array > radial_range[1], img_mask)
img_mask = np.ma.masked_where(chi_array < np.min(azimuth_range), img_mask)
img_mask = np.ma.masked_where(chi_array > np.max(azimuth_range), img_mask)
q_rad, i_rad, _, _ = splitBBox.histoBBox1d(img_mask,
pos0=tth_array,
delta_pos0=np.ones_like(img_mask) * (q_par[1] - q_par[0])/np.shape(
img_mask)[1],
pos1=q_v_te,
delta_pos1=np.ones_like(img_mask) * (q_per[1] - q_per[0])/np.shape(
img_mask)[0],
bins=bins,
pos0Range=np.array([np.min(tth_array), np.max(tth_array)]),
pos1Range=q_per,
dummy=None,
delta_dummy=None,
mask=img_mask.mask
)
return q_rad, i_rad
def integrate_qpar(img, q_par, q_per, q_par_range=None, q_per_range=None):
"""
Horizontal integration of a 2D array using masked array
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param img: 2D array containing intensity
:type img: ndarray
:param q_par_range: q_par range (in A-1) at the which the integration will be done
:type q_par_range: Tuple
:param q_per_range: q_per range (in A-1) at the which the integration will be done
:type q_per_range: Tuple
"""
if q_par_range is None:
q_par_range = (np.asarray(q_par).min(), np.asarray(q_par).max())
if q_per_range is None:
q_per_range = (np.asarray(q_per).min(), np.asarray(q_per).max())
q_par = np.linspace(q_par[0], q_par[1], np.shape(img)[1])
q_per = np.linspace(q_per[0], q_per[1], np.shape(img)[0])[::-1]
qpar_mesh, qper_mesh = np.meshgrid(q_par, q_per)
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(qper_mesh < q_per_range[0], img_mask)
img_mask = np.ma.masked_where(qper_mesh > q_per_range[1], img_mask)
img_mask = np.ma.masked_where(q_par_range[0] > qpar_mesh, img_mask)
img_mask = np.ma.masked_where(q_par_range[1] < qpar_mesh, img_mask)
i_par = np.mean(img_mask, axis=0)
return q_par, i_par
def integrate_qper(img, q_par, q_per, q_par_range=None, q_per_range=None):
"""
Vertical integration of a 2D array using masked array
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param img: 2D array containing intensity
:type img: ndarray
:param q_par_range: q_par range (in A-1) at the which the integration will be done
:type q_par_range: Tuple
:param q_per_range: q_per range (in A-1) at the which the integration will be done
:type q_per_range: Tuple
"""
if q_par_range is None:
q_par_range = ( | np.asarray(q_par) | numpy.asarray |
"""
Main interface module to use pyEPR.
Contains code to conenct to ansys and to analyze hfss files using the EPR method.
Further contains code to be able to do autogenerated reports, analysis, and such.
Copyright <NAME>, <NAME>, and the pyEPR tea
2015, 2016, 2017, 2018, 2019, 2020
"""
from __future__ import print_function # Python 2.7 and 3 compatibility
import os
import sys
import time
import pickle
import shutil
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# Standard imports
from numpy import pi
from numpy.linalg import inv
from stat import S_ISREG, ST_CTIME, ST_MODE
from pandas import Series, DataFrame
from collections import OrderedDict
from pathlib import Path
# pyEPR custom imports
from . import ansys
from . import logger
from . import config
from . import Dict
from .ansys import ureg, CalcObject, ConstantVecCalcObject, set_property
from .toolbox.pythonic import print_NoNewLine, print_color, deprecated, fact, nck, \
divide_diagonal_by_2, print_matrix, DataFrame_col_diff, get_instance_vars,\
sort_df_col, sort_Series_idx
from .calcs.constants import epsilon_0, hbar, Planck, fluxQ
from .calcs.basic import CalcsBasic
from .calcs.back_box_numeric import bbq_hmt, make_dispersive
from .toolbox.plotting import cmap_discrete, legend_translucent
from .reports import plot_convergence_f_vspass, plot_convergence_max_df,\
plot_convergence_solved_elem, plot_convergence_maxdf_vs_sol
class Project_Info(object):
"""
Class containing options and information about the manipulation and analysis in HFSS.
Junction info:
-----------------------
self.junctions : OrderedDict()
A Josephson tunnel junction has to have its parameters specified here for the analysis.
Each junction is given a name and is specified by a dictionary.
It has the following properties:
`Lj_variable`: Name of HFSS variable that specifies junction inductance Lj defined
on the boundary condition in HFSS. DO NOT USE Global names that start
with $.
`rect`: Name of HFSS rectangle on which lumped boundary condition is specified.
`line`: Name of HFSS polyline which spans the length of the recntalge.
Used to define the voltage across the junction.
Used to define the current orientation for each junction.
Used to define sign of ZPF.
`length`: Length in HFSS of the junction rectangle and line
(specified in meters). You can use epr.parse_units('100um')
Example definition:
..code-block python
# Define a single junction
pinfo = Project_Info('')
pinfo.junctions['j1'] = {'Lj_variable' : 'Lj1',
'rect' : 'JJrect1',
'line' : 'JJline1',
'length' : parse_units('50um')} # Length is in meters
# Specify multiple junctions in HFSS model
n_junctions = 5
for i in range(1, 1+n_junctions):
pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj{i}',
'rect' : f'JJrect{i}',
'line' : f'JJline{i}',
'length' : parse_units('50um')}
HFSS app connection settings
-----------------------
project_path : str
Directory path to the hfss project file. Should be the directory, not the file.
default = None: Assumes the project is open, and thus gets the project based
on `project_name`
project_name : str, None
Name of the project within the project_path. "None" will get the current active one.
design_name : str, None
Name of the design within the project. "None" will get the current active one.
setup_name : str, None
Name of the setup within the design. "None" will get the current active one.
Additional init setting:
-----------------------
do_connect : True by default. Connect to HFSS
HFSS desgin settings
-----------------------
describe junction parameters
junc_rects = None
Name of junction rectangles in HFSS
junc_lines = None
Name of lines in HFSS used to define the current orientation for each junction
junc_LJ_names = None
Name of junction inductance variables in HFSS.
Note, DO NOT USE Global names that start with $.
junc_lens = None
Junciton rect. length, measured in meters.
"""
class _Dissipative:
#TODO: remove and turn to dict
def __init__(self):
self.dielectrics_bulk = None
self.dielectric_surfaces = None
self.resistive_surfaces = None
self.seams = None
def __init__(self, project_path=None, project_name=None, design_name=None,
setup_name=None, do_connect=True):
# Path: format path correctly to system convention
self.project_path = str(Path(project_path)) \
if not (project_path is None) else None
self.project_name = project_name
self.design_name = design_name
self.setup_name = setup_name
## HFSS desgin: describe junction parameters
# TODO: introduce modal labels
self.junctions = Dict() # See above for help
self.ports = Dict()
## Dissipative HFSS volumes and surfaces
self.dissipative = self._Dissipative()
self.options = config.ansys
# Conected to HFSS variable
self.app = None
self.desktop = None
self.project = None
self.design = None
self.setup = None
if do_connect:
self.connect()
_Forbidden = ['app', 'design', 'desktop', 'project',
'dissipative', 'setup', '_Forbidden', 'junctions']
def save(self):
'''
Return a dicitonary to save
'''
return dict(
pinfo=pd.Series(get_instance_vars(self, self._Forbidden)),
dissip=pd.Series(get_instance_vars(self.dissipative)),
options=pd.Series(get_instance_vars(self.options)),
junctions=pd.DataFrame(self.junctions),
ports=pd.DataFrame(self.ports),
)
@deprecated
def connect_to_project(self):
return self.connect()
def connect(self):
"""
Connect to Ansys Desktop.
"""
logger.info('Connecting to Ansys Desktop API...')
self.app, self.desktop, self.project = ansys.load_ansys_project(
self.project_name, self.project_path)
self.project_name = self.project.name
self.project_path = self.project.get_path()
### Design
if self.design_name is None:
self.design = self.project.get_active_design()
self.design_name = self.design.name
logger.info(f'\tOpened active design\n\
\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')
else:
try:
self.design = self.project.get_design(self.design_name)
logger.info(f'\tOpened active design\n\
\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error \N{loudly crying face}: {e}\n")
raise(Exception(' Did you provide the correct design name?\
Failed to pull up design. \N{loudly crying face}').with_traceback(tb))
### Setup
try:
setup_names = self.design.get_setup_names()
if len(setup_names) == 0:
logger.warning('\tNo design setup detected.')
if self.design.solution_type == 'Eigenmode':
logger.warning('\tCreating eigenmode default setup one.')
setup = self.design.create_em_setup()
self.setup_name = setup.name
elif self.design.solution_type == 'DrivenModal':
setup = self.design.create_dm_setup() # adding a driven modal design
self.setup_name = setup.name
elif self.setup_name == None:
self.setup_name = setup_names[0]
elif self.setup_name not in setup_names:
logger.error(f"\tSetup does not exist.")
raise(Exception(' Did you provide the correct setup name?\
Failed to pull up setup. \N{loudly crying face}'))
self.get_setup(self.setup_name) # get the actual setup if there is one
except Exception as e:
tb = sys.exc_info()[2]
logger.error(f"Original error \N{loudly crying face}: {e}\n")
raise(Exception(' Did you provide the correct setup name?\
Failed to pull up setup. \N{loudly crying face}').with_traceback(tb))
# Finalize
self.project_name = self.project.name
self.design_name = self.design.name
logger.info('\tConnection to Ansys established successfully. \N{grinning face} \n')
return self
def get_setup(self, name):
"""
Connects to a specific setup for the design.
Sets self.setup and self.setup_name.
If Name is
"""
if name is None:
return None
else:
self.setup = self.design.get_setup(name=self.setup_name)
if self.setup is None:
logger.error(f"Could not retrieve setup: {self.setup_name}\n \
Did you give the right name? Does it exist?")
self.setup_name = self.setup.name
logger.info(f'\tOpened setup `{self.setup_name}` ({type(self.setup)})')
return self.setup
def check_connected(self):
"""Checks if fully connected including setup
"""
return\
(self.setup is not None) and\
(self.design is not None) and\
(self.project is not None) and\
(self.desktop is not None) and\
(self.app is not None)
def disconnect(self):
'''
Disconnect from existing HFSS design.
'''
assert self.check_connected() is True,\
"It does not appear that you have connected to HFSS yet.\
Use the connect() method. \N{nauseated face}"
self.project.release()
self.desktop.release()
self.app.release()
ansys.release()
### UTILITY FUNCTIONS
def get_dm(self):
'''
Utility shortcut function to get the design and modeler.
.. code-block:: python
oDesign, oModeler = projec.get_dm()
'''
return self.design, oDesign.modeler
def get_all_variables_names(self):
"""Returns array of all project and local design names."""
return self.project.get_variable_names() + self.design.get_variable_names()
def get_all_object_names(self):
"""Returns array of strings"""
oObjects = []
for s in ["Non Model", "Solids", "Unclassified", "Sheets", "Lines"]:
oObjects += self.design.modeler.get_objects_in_group(s)
return oObjects
def validate_junction_info(self):
""" Validate that the user has put in the junction info correctly.
Do no also forget to check the length of the rectangles/line of
the junction if you change it.
"""
all_variables_names = self.get_all_variables_names()
all_object_names = self.get_all_object_names()
for jjnm, jj in self.junctions.items():
assert jj['Lj_variable'] in all_variables_names,\
"""pyEPR project_info user error found \N{face with medical mask}:
Seems like for junction `%s` you specified a design or project
variable for `Lj_variable` that does not exist in HFSS by the name:
`%s` """ % (jjnm, jj['Lj_variable'])
for name in ['rect', 'line']:
assert jj[name] in all_object_names, \
"""pyEPR project_info user error found \N{face with medical mask}:
Seems like for junction `%s` you specified a %s that does not exist
in HFSS by the name: `%s` """ % (jjnm, name, jj[name])
#TODO: Check the length of the rectnagle
#==============================================================================
#%% Main compuation class & interface with HFSS
#==============================================================================
class pyEPR_HFSSAnalysis(object):
"""
This class defines a pyEPR_HFSSAnalysis object which calculates and saves
Hamiltonian parameters from an HFSS simulation.
Further, it allows one to calcualte dissipation, etc
"""
def __init__(self, *args, **kwargs):
'''
Parameters:
-------------------
project_info : Project_Info
Suplpy the project info or the parameters to create pinfo
Example use:
-------------------
'''
if (len(args) == 1) and (args[0].__class__.__name__ == 'Project_Info'):
#isinstance(args[0], Project_Info): # fails on module repload with changes
project_info = args[0]
else:
assert len(args) == 0, '''Since you did not pass a Project_info object
as a arguemnt, we now assuem you are trying to create a project
info object here by apassing its arguments. See Project_Info.
It does not take any arguments, only kwargs. \N{face with medical mask}'''
project_info = Project_Info(*args, **kwargs)
# Input
self.pinfo = project_info
if self.pinfo.check_connected() is False:
self.pinfo.connect()
self.verbose = True #TODO: change verbose to logger. remove verbose flags
self.append_analysis = False
# hfss connect module
self.fields = None
self.solutions = None
if self.setup:
self.fields = self.setup.get_fields()
self.solutions = self.setup.get_solutions()
# Stores resutls from sims
self.results = Dict() # of variations. Saved results
self.hfss_variables = Dict() # container for eBBQ list of varibles
# Variations - the following get updated in update_variation_information
self.nmodes = int(1)
self.listvariations = ("",)
self.nominalvariation = '0'
self.nvariations = 0
self.update_variation_information()
if self.verbose:
print('Design \"%s\" info:'%self.design.name)
print('\t%-15s %d\n\t%-15s %d' %('# eigenmodes', self.nmodes, \
'# variations', self.nvariations))
# Setup data saving
self.data_dir = None
self.file_name = None
self.setup_data()
@property
def setup(self):
return self.pinfo.setup
@property
def design(self):
return self.pinfo.design
@property
def project(self):
return self.pinfo.project
@property
def desktop(self):
return self.pinfo.desktop
@property
def app(self):
return self.pinfo.app
@property
def junctions(self):
return self.pinfo.junctions
@property
def ports(self):
return self.pinfo.ports
@property
def options(self):
return self.pinfo.options
def setup_data(self):
'''
Set up folder paths for saving data to.
Sets the save filename with the current time.
Saves to Path(config.root_dir) / self.project.name / self.design.name
'''
if len(self.design.name) > 50:
logger.error('WARNING! DESIGN FILENAME MAY BE TOO LONG! ')
self.data_dir = Path(config.root_dir) / self.project.name / self.design.name
self.data_filename = self.data_dir / (time.strftime(config.save_format, \
time.localtime()) + '.npz')
if not self.data_dir.is_dir():
self.data_dir.mkdir(parents=True, exist_ok=True)
def calc_p_junction_single(self, mode):
'''
This function is used in the case of a single junction only.
For multiple junctions, see `calc_p_junction`.
Assumes no lumped capacitive elements.
'''
pj = OrderedDict()
pj_val = (self.U_E-self.U_H)/self.U_E
pj['pj_'+str(mode)] = np.abs(pj_val)
print(' p_j_' + str(mode) + ' = ' + str(pj_val))
return pj
#TODO: replace this method with the one below, here because osme funcs use it still
def get_freqs_bare(self, variation):
"""Outdated. Do not use. To be depreicated
Arguments:
variation {[str]} -- [description]
Returns:
[type] -- [description]
"""
#str(self.get_lv(variation))
freqs_bare_vals = []
freqs_bare_dict = OrderedDict()
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
for m in range(self.nmodes):
freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m]
freqs_bare_vals.append(1e9*freqs[m])
if kappa_over_2pis is not None:
freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m]
else:
freqs_bare_dict['Q_'+str(m)] = 0
self.freqs_bare = freqs_bare_dict
self.freqs_bare_vals = freqs_bare_vals
return freqs_bare_dict, freqs_bare_vals
def get_freqs_bare_pd(self, variation:str):
"""Return the freq and Qs of the solved modes for a variation
Arguments:
variation {[str]} -- Index of variation
Returns:
Fs, Qs -- Tuple of pandas.Series objects.
the row index is the mode number
"""
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_lv_EM(variation))
if kappa_over_2pis is None:
kappa_over_2pis = np.zeros(len(freqs))
freqs = pd.Series(freqs, index=range(len(freqs))) # GHz
Qs = freqs / pd.Series(kappa_over_2pis, index=range(len(freqs)))
return freqs, Qs
def get_lv(self, variation=None):
'''
List of variation variables.
Returns list of var names and var values.
Such as ['Lj1:=','13nH', 'QubitGap:=','100um']
Parameters
-----------
variation : string number such as '0' or '1' or ...
'''
if variation is None:
lv = self.nominalvariation
lv = self.parse_listvariations(lv)
else:
lv = self.listvariations[ureg(variation)]
lv = self.parse_listvariations(lv)
return lv
def get_lv_EM(self, variation):
if variation is None:
lv = self.nominalvariation
#lv = self.parse_listvariations_EM(lv)
else:
lv = self.listvariations[ureg(variation)]
#lv = self.parse_listvariations_EM(lv)
return str(lv)
def parse_listvariations_EM(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def parse_listvariations(self, lv):
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def get_variables(self, variation=None):
lv = self.get_lv(variation)
variables = OrderedDict()
for ii in range(int(len(lv)/2)):
variables['_'+lv[2*ii][:-2]] = lv[2*ii+1]
self.variables = variables
return variables
def calc_energy_electric(self,
variation=None,
volume='AllObjects',
smooth=False):
r'''
Calculates two times the peak electric energy, or 4 times the RMS,
:math:`4*\mathcal{E}_{\mathrm{elec}}`
(since we do not divide by 2 and use the peak phasors).
.. math::
\mathcal{E}_{\mathrm{elec}}=\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}
volume : string | 'AllObjects'
smooth : bool | False
Smooth the electric field or not when performing calculation
Example use to calcualte the energy participation of a substrate
.. code-block python
ℰ_total = epr_hfss.calc_energy_electric(volume='AllObjects')
ℰ_substr = epr_hfss.calc_energy_electric(volume='Box1')
print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%')
'''
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
if smooth:
vecE = vecE.smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_energy_magnetic(self,
variation=None,
volume='AllObjects',
smooth=False):
'''
See calc_energy_electric
'''
calcobject = CalcObject([], self.setup)
vecH = calcobject.getQty("H")
if smooth:
vecH = vecH.smooth()
A = vecH.times_mu()
B = vecH.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
lv = self.get_lv(variation)
return A.evaluate(lv=lv)
def calc_p_electric_volume(self,
name_dielectric3D,
relative_to='AllObjects',
E_total=None
):
r'''
Calculate the dielectric energy-participatio ratio
of a 3D object (one that has volume) relative to the dielectric energy of
a list of object objects.
This is as a function relative to another object or all objects.
When all objects are specified, this does not include any energy
that might be stored in any lumped elements or lumped capacitors.
Returns:
---------
ℰ_object/ℰ_total, (ℰ_object, _total)
'''
if E_total is None:
logger.debug('Calculating ℰ_total')
ℰ_total = self.calc_energy_electric(volume=relative_to)
else:
ℰ_total = E_total
logger.debug('Calculating ℰ_object')
ℰ_object = self.calc_energy_electric(volume=name_dielectric3D)
return ℰ_object/ℰ_total, (ℰ_object, ℰ_total)
def calc_current(self, fields, line):
'''
Function to calculate Current based on line. Not in use
line : integration line between plates - name
'''
self.design.Clear_Field_Clac_Stack()
comp = fields.Vector_H
exp = comp.integrate_line_tangent(line)
I = exp.evaluate(phase=90)
self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_mag(self, variation, junc_rect, junc_line):
''' Peak value I_max of the projected mode current in junction J
The avg. is over the surface of the junction. I.e., spatial. '''
lv = self.get_lv(variation)
jl, uj = self.get_junc_len_dir(variation, junc_line)
uj = ConstantVecCalcObject(uj, self.setup)
calc = CalcObject([], self.setup)
#calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect)
calc = (((calc.getQty("Jsurf")).dot(uj)).complexmag()
).integrate_surf(name=junc_rect)
I = calc.evaluate(lv=lv) / jl # phase = 90
#self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_complex(self, variation, junc_rect, junc_line):
''' Complex average value of the projected mode current in junction J
The avg. is over the surface of the junction. I.e., spatial. '''
lv = self.get_lv(variation)
jl, uj = self.get_junc_len_dir(variation, junc_line)
uj = ConstantVecCalcObject(uj, self.setup)
calc_re = CalcObject([], self.setup)
calc_re = (((calc_re.getQty("Jsurf")).dot(uj)).real()
).integrate_surf(name=junc_rect)
calc_im = CalcObject([], self.setup)
calc_im = (((calc_im.getQty("Jsurf")).dot(uj)).imag()
).integrate_surf(name=junc_rect)
I = (calc_re.evaluate(lv=lv) + 1j * calc_im.evaluate(lv=lv)) / jl
return I
def calc_current_line_voltage(self, variation, junc_line_name, junc_L_Henries):
'''
Peak current I_max for prespecified mode calculating line voltage across junction.
Parameters:
------------------------------------------------
variation: variation number
junc_line_name: name of the HFSS line spanning the junction
junc_L_Henries: junction inductance in henries
TODO: Smooth?
'''
lv = self.get_lv(variation)
v_calc_real = CalcObject([], self.setup).getQty(
"E").real().integrate_line_tangent(name=junc_line_name)
v_calc_imag = CalcObject([], self.setup).getQty(
"E").imag().integrate_line_tangent(name=junc_line_name)
V = np.sqrt(v_calc_real.evaluate(lv=lv)**2 +
v_calc_imag.evaluate(lv=lv)**2)
freq = CalcObject(
[('EnterOutputVar', ('Freq', "Complex"))], self.setup).real().evaluate()
return V/(2*np.pi*freq*junc_L_Henries) # I=V/(wL)s
def calc_line_current(self, variation, junc_line_name):
lv = self.get_lv(variation)
calc = CalcObject([], self.setup)
calc = calc.getQty("H").imag().integrate_line_tangent(
name=junc_line_name)
#self.design.Clear_Field_Clac_Stack()
return calc.evaluate(lv=lv)
def calc_surf_impedance_losses(self, variation, surf):
''' Calculate the total losses of mode m in the surface impedance
boundary surf, using HFSS quantity SurfaceLossDensity.
It basically integrates the real part of the Poynting vector over
the surface. Therefore any simulated loss will be captured.
'''
lv = self.get_lv(variation)
calc = CalcObject([], self.setup)
calc = calc.getQty("SurfaceLossDensity").integrate_surf(name=surf)
P = calc.evaluate(lv=lv)
return P
def get_junc_len_dir(self, variation, junc_line):
'''
Return the length and direction of a junction defined by a line
Inputs: variation: simulation variation
junc_line: polyline object
Outputs: jl (float) junction length
uj (list of 3 floats) x,y,z coordinates of the unit vector
tangent to the junction line
'''
#
lv = self.get_lv(variation)
u = []
for coor in ['X', 'Y', 'Z']:
calc = CalcObject([], self.setup)
calc = calc.line_tangent_coor(junc_line, coor)
u.append(calc.evaluate(lv=lv))
jl = float(np.sqrt(u[0]**2+u[1]**2+u[2]**2))
uj = [float(u[0]/jl), float(u[1]/jl), float(u[2]/jl)]
return jl, uj
def get_Qseam(self, seam, mode, variation):
r'''
Caculate the contribution to Q of a seam, by integrating the current in
the seam with finite conductance: set in the config file
ref: http://arxiv.org/pdf/1509.01119.pdf
'''
lv = self.get_lv(variation)
Qseam = OrderedDict()
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseam['Qseam_'+seam+'_' +
str(mode)] = config.dissipation.gseam/yseam
print('Qseam_' + seam + '_' + str(mode) + str(' = ') +
str(config.dissipation.gseam/config.dissipation.yseam))
return Series(Qseam)
def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, pltresult=True):
# values = ['5mm','6mm','7mm']
# ref: http://arxiv.org/pdf/1509.01119.pdf
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
print(variation)
print(type(variation))
print(ureg(variation))
self.U_H = self.calc_energy_magnetic(variation)
lv = self.get_lv(variation)
Qseamsweep = []
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
for value in values:
self.design.set_variable(variable, str(value)+unit)
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseamsweep.append(config.dissipation.gseam/yseam)
# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam
#Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
if pltresult:
_, ax = plt.subplots()
ax.plot(values, Qseamsweep)
ax.set_yscale('log')
ax.set_xlabel(variable+' ('+unit+')')
ax.set_ylabel('Q'+'_'+seam)
return Qseamsweep
def get_Qdielectric(self, dielectric, mode, variation):
Qdielectric = OrderedDict()
print('Calculating Qdielectric_' + dielectric + ' for mode ' +
str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
U_dielectric = self.calc_energy_electric(variation, volume=dielectric)
p_dielectric = U_dielectric/self.U_E
#TODO: Update make p saved sep. and get Q for diff materials, indep. specify in pinfo
Qdielectric['Qdielectric_'+dielectric+'_' +
str(mode)] = 1/(p_dielectric*config.dissipation.tan_delta_sapp)
print('p_dielectric'+'_'+dielectric+'_' +
str(mode)+' = ' + str(p_dielectric))
return Series(Qdielectric)
def get_Qsurface_all(self, mode, variation):
'''
caculate the contribution to Q of a dieletric layer of dirt on all surfaces
set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
lv = self.get_lv(variation)
Qsurf = OrderedDict()
print('Calculating Qsurface for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.nmodes-1) + ')')
# A = self.fields.Mag_E**2
# A = A.integrate_vol(name='AllObjects')
# U_surf = A.evaluate(lv=lv)
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
A = vecE
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_surf(name='AllObjects')
U_surf = A.evaluate(lv=lv)
U_surf *= config.dissipation.th*epsilon_0*config.dissipation.eps_r
p_surf = U_surf/self.U_E
Qsurf['Qsurf_'+str(mode)] = 1 / \
(p_surf*config.dissipation.tan_delta_surf)
print('p_surf'+'_'+str(mode)+' = ' + str(p_surf))
return Series(Qsurf)
def calc_Q_external(self, variation, freq_GHz, U_E):
'''
Calculate the coupling Q of mode m with each port p
Expected that you have specified the mode before calling this
'''
Qp = pd.Series({})
freq = freq_GHz * 1e9 # freq in Hz
for port_nm, port in self.pinfo.ports.items():
rects = port['rect'] if type(port['rect']) is list else [port['rect']]
lines = port['line'] if type(port['line']) is list else [port['line']]
Rs = port['R'] if type(port['R']) is list else [port['R']]
if len(rects) != len(lines) or len(rects) != len(Rs):
raise ValueError(f'Number of rect, lines and R are different in \
port {port_nm}.')
P = 0
for rect, line, R in zip(rects, lines, Rs):
if self.pinfo.options.method_calc_Q == 'Jsurf':
I_peak = self.calc_avg_current_J_surf_mag(variation,
rect, line)
P += 0.5 * R * I_peak**2
elif self.pinfo.options.method_calc_Q == 'SurfaceLossDensity':
P += self.calc_surf_impedance_losses(variation, rect)
else:
raise NotImplementedError('Other calculation methods\
(self.pinfo.options.method_calc_Q) are possible but not implemented here. ')
U_dissip = P / freq
p = U_dissip / (U_E/2) # U_E is 2x the peak electrical energy
kappa = p * freq
Q = 2 * np.pi * freq / kappa
Qp['Q_' + port_nm] = Q
return Qp
def calc_drive_phase(self, variation):
'''
Calculate the phase along which mode m is driven by each port p.
The port should be at a distance much smaller than the wavelength
to the capacitance, inductance, whatsoever is coupled to the resonator.
'''
drive_phase = pd.Series({})
for port_nm, port in self.pinfo.ports.items():
rects = port['rect'] if type(port['rect']) is list else [port['rect']]
lines = port['line'] if type(port['line']) is list else [port['line']]
if len(rects) != len(lines):
raise ValueError(f'Number of rect and lines are different in \
port {port_nm}.')
I = 0
for rect, line in zip(rects, lines):
I += self.calc_avg_current_J_surf_complex(variation, rect, line)
I /= len(rects)
drive_phase['Phi_' + port_nm] = np.angle(I)
return drive_phase
def calc_p_junction(self, variation, U_H, U_E, Ljs):
'''
Expected that you have specified the mode before calling this, `self.set_mode(num)`
Expected to precalc U_H and U_E for mode, will retunr pandas series object
junc_rect = ['junc_rect1', 'junc_rect2'] name of junc rectangles to integrate H over
junc_len = [0.0001] specify in SI units; i.e., meters
LJs = [8e-09, 8e-09] SI units
calc_sign = ['junc_line1', 'junc_line2']
This function assumes there are no lumped capacitors in model.
Potential errors: If you dont have a line or rect by the right name you will prob
get an erorr o the type:
com_error: (-2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024365), None)
'''
Pj = pd.Series({})
Sj = pd.Series({})
for junc_nm, junc in self.pinfo.junctions.items():
logger.debug(f'Calculating participation for {(junc_nm, junc)}')
# Get peak current though junction I_peak
if self.pinfo.options.method_calc_P_mj is 'J_surf_mag':
I_peak = self.calc_avg_current_J_surf_mag(
variation, junc['rect'], junc['line'])
elif self.pinfo.options.method_calc_P_mj is 'line_voltage':
I_peak = self.calc_current_line_voltage(
variation, junc['line'], Ljs[junc_nm])
else:
raise NotImplementedError('Other calculation methods\
(self.pinfo.options.method_calc_P_mj) are possible but not implemented here. ')
Pj['p_' + junc_nm] = Ljs[junc_nm] * I_peak**2 / U_E
# divie by U_E: participation normed to be between 0 and 1 by the
# total capacitive energy which should be the total inductive energy
# Sign bit
Sj['s_' + junc_nm] = + \
1 if (self.calc_line_current(
variation, junc['line'])) > 0 else -1
if self.verbose:
print('\t{:<15} {:>8.6g} {:>5s}'.format(
junc_nm,
Pj['p_' + junc_nm],
'+' if Sj['s_' + junc_nm] > 0 else '-'))
return Pj, Sj
def get_previously_analyzed(self, filename=None):
# TODO: load from data_file
# Retun previously analyze variations from load filename
return []
def do_EPR_analysis(self,
variations:list=None,
modes=None):
"""
Main analysis routine
Load results with pyEPR_Analysis
..code-block python
pyEPR_Analysis(self.data_filename, variations=variations) ```
Optional Parameters:
------------------------
variations : list | None
Example list of variations is ['0', '1']
A variation is a combination of project/design variables in an optimetric sweep
modes : list | None
Modes to analyze
for example modes = [0, 2, 3]
HFSS Notes:
------------------------
Assumptions:
Low dissipation (high-Q).
Right now, we assume that there are no lumped capcitors to simply calculations.
Not required.
We assume that there are only lumped inductors, so that U_tot = U_E+U_H+U_L
and U_C =0, so that U_tot = 2*U_E;
"""
# Track the total timing
self._run_time = time.strftime('%Y%m%d_%H%M%S', time.localtime())
# Update the latest hfss variation information
self.update_variation_information()
# Define local variables
variations = variations or self.variations
modes = modes or range(self.nmodes)
previously_analyzed = self.get_previously_analyzed()
project_info_save = self.pinfo.save() # dict
### Main loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#TODO: Move inside of loop to funciton calle self.analyze_variation
for ii, variation in enumerate(variations):
print(f'\nVariation {variation} [{ii+1}/{len(variations)}]')
# Previously analyzed and we shouldnt overwrite ?
if self.append_analysis and variation in previously_analyzed:
print_NoNewLine(' previously analyzed ...\n')
continue
# If not, clear the results
self.results[variation] = Dict()
self.lv = self.get_lv(variation)
time.sleep(0.4)
if self.has_fields() == False:
logger.error(f" Error: HFSS does not have field solution for mode={ii}.\
Skipping this mode in the analysis")
continue
freqs_bare_GHz, Qs_bare = self.get_freqs_bare_pd(variation)
self.hfss_variables[variation] = pd.Series(
self.get_variables(variation=variation))
Ljs = pd.Series({})
for junc_name, val in self.junctions.items(): # junction nickname
Ljs[junc_name] = ureg.Quantity(
self.hfss_variables[variation]['_'+val['Lj_variable']]).to_base_units(\
).magnitude
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# This is crummy now. Maybe use xarray.
Om = OrderedDict() # Matrix of angular frequency (of analyzed modes)
Pm = OrderedDict() # Participation P matrix
Sm = OrderedDict() # Sign S matrix
Qm_coupling = OrderedDict() # Quality factor matrix
Phi_d = OrderedDict() # Phase along which a port drives a mode
SOL = OrderedDict() # Other results
for mode in modes: # integer of mode number [0,1,2,3,..]
# Load fields for mode
self.set_mode(mode)
### Get HFSS solved frequencies
_Om = pd.Series({})
temp_freq = freqs_bare_GHz[mode]
_Om['freq_GHz'] = temp_freq # freq
Om[mode] = _Om
print(f' Mode {mode} at {"%.2f" % temp_freq} GHz [{mode+1}/{self.nmodes}]')
### EPR Hamiltonian calculations
# Calculation global energies and report
# Magnetic
print(' Calculating ℰ_magnetic', end=',')
try:
self.U_H = self.calc_energy_magnetic(variation)
except Exception as e:
tb = sys.exc_info()[2]
print("\n\nError:\n", e)
raise(Exception(' Did you save the field solutions?\n\
Failed during calculation of the total magnetic energy.\
This is the first calculation step, and is indicative that there are \
no field solutions saved. ').with_traceback(tb))
# Electric
print('ℰ_electric')
self.U_E = self.calc_energy_electric(variation)
sol = Series({'U_H': self.U_H, 'U_E': self.U_E})
# Fraction
print(f""" {'(ℰ_E-ℰ_H)/ℰ_E':>15s} {'ℰ_E':>9s} {'ℰ_H':>9s}
{100*(self.U_E - self.U_H)/self.U_E:>15.1f}% {self.U_E:>9.4g} {self.U_H:>9.4g}\n""")
# Calcualte EPR for each of the junctions
print(f' Calculating junction EPR [method={self.pinfo.options.method_calc_P_mj}]')
print(f"\t{'junction':<15s} EPR p_{mode}j sign s_{mode}j")
Pm[mode], Sm[mode] = self.calc_p_junction(variation, self.U_H, self.U_E, Ljs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# EPR Dissipative calculations -- should be a function block below
# TODO: this should really be passed as argument to the functions rather than a
# property of the calss I would say
self.omega = 2*np.pi*freqs_bare_GHz[mode]
Qm_coupling[mode] = self.calc_Q_external(variation,
freqs_bare_GHz[mode],
self.U_E)
Phi_d[mode] = self.calc_drive_phase(variation)
# get seam Q
if self.pinfo.dissipative.seams:
for seam in self.pinfo.dissipative.seams:
sol = sol.append(self.get_Qseam(seam, mode, variation))
# get Q dielectric
if self.pinfo.dissipative.dielectrics_bulk:
for dielectric in self.pinfo.dissipative.dielectrics_bulk:
sol = sol.append(self.get_Qdielectric(
dielectric, mode, variation))
# get Q surface
if self.pinfo.dissipative.resistive_surfaces :
if self.pinfo.dissipative.resistive_surfaces is 'all':
sol = sol.append(
self.get_Qsurface_all(mode, variation))
else:
raise NotImplementedError(
"Join the team, by helping contribute this piece of code.")
if self.pinfo.dissipative.resistive_surfaces is not None:
raise NotImplementedError(
"Join the team, by helping contribute this piece of code.")
SOL[mode] = sol
# Save
self._update_results(variation, Om, Pm, Sm, Qm_coupling, Phi_d,
SOL, freqs_bare_GHz, Qs_bare, Ljs,
self.hfss_variables[variation])
self.save()
print('\nANALYSIS DONE. Data saved to:\n\n' + str(self.data_filename)+'\n\n')
return self.data_filename, variations
def _update_results(self, variation:str, Om, Pm, Sm, Qm_coupling, Phi_d,
sols, freqs_bare_GHz, Qs_bare, Ljs, hfss_variables):
'''
Save variation
'''
self.results[variation]['Om'] = pd.DataFrame(Om)
self.results[variation]['Pm'] = pd.DataFrame(Pm).transpose() # raw, not normalized
self.results[variation]['Sm'] = pd.DataFrame(Sm).transpose()
self.results[variation]['sols'] = pd.DataFrame(sols).transpose()
self.results[variation]['Qm_coupling'] = pd.DataFrame(Qm_coupling).transpose()
self.results[variation]['Drive_phase'] = pd.DataFrame(Phi_d).transpose()
self.results[variation]['Ljs'] = Ljs
self.results[variation]['Qs'] = Qs_bare
self.results[variation]['freqs_hfss_GHz'] = freqs_bare_GHz
self.results[variation]['hfss_variables'] = hfss_variables
self.results[variation]['mesh'] = None
self.results[variation]['convergence'] = None
self.results[variation]['convergence_f_pass'] = None
if self.options.save_mesh_stats:
self.results[variation]['mesh'] = self.get_mesh_statistics(variation) # dataframe
self.results[variation]['convergence'] = self.get_convergence(variation)
self.results[variation]['convergence_f_pass'] = self.hfss_report_f_convergence(
variation, save_csv=False) # dataframe
@staticmethod
def results_variations_on_inside(results:dict):
"""
Switches the order on result of variations. Reverse dict.
"""
keys = set()
variations = list(results.keys())
# Get all keys
for variation in variations:
result = results[variation]
keys.update(result.keys())
new_res = dict()
for key in keys:
new_res[key] = {variation: results[variation].get(key, None) \
for variation in variations}
# Conver to pandas Dataframe if all are series
if all(isinstance(new_res[key][variation], pd.Series) for variation in variations):
#print(key) # Conver these to datafrme
new_res[key] = pd.DataFrame(new_res[key]) # Variations will vecome columns
new_res[key].columns.name='variation'
# sort_df_col : maybe sort
return new_res # dict of keys now
def save(self, project_info:dict=None):
"""Save results to self.data_filename
Keyword Arguments:
project_info {dict} -- [description] (default: {None})
"""
if project_info is None:
project_info= self.pinfo.save()
to_save = dict(
project_info = project_info,
results = self.results,
)
with open(str(self.data_filename), 'wb') as handle:
pickle.dump(to_save, handle)#, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, filepath=None):
"""Utility function to load reuslts file
Keyword Arguments:
filepath {[type]} -- [description] (default: {None})
"""
filepath = filepath or self.data_filename
with open(str(filepath), 'rb') as handle:
loaded = pickle.load(handle)
return loaded
def get_mesh_statistics(self, variation='0'):
'''
Input:
variation='0' ,'1', ...
Returns dataframe:
```
Name Num Tets Min edge length Max edge length RMS edge length Min tet vol Max tet vol Mean tet vol Std Devn (vol)
0 Region 909451 0.000243 0.860488 0.037048 6.006260e-13 0.037352 0.000029 6.268190e-04
1 substrate 1490356 0.000270 0.893770 0.023639 1.160090e-12 0.031253 0.000007 2.309920e-04
```
'''
variation = self.listvariations[ureg(variation)]
return self.setup.get_mesh_stats(variation)
def get_convergence(self, variation='0'):
'''
Input:
variation='0' ,'1', ...
Returns dataframe:
```
Solved Elements Max Delta Freq. % Pass Number
1 128955 NaN
2 167607 11.745000
3 192746 3.208600
4 199244 1.524000
```
'''
variation = self.listvariations[ureg(variation)]
df, _ = self.setup.get_convergence(variation)
return df
def get_convergence_vs_pass(self, variation='0'):
'''
Returns a convergence vs pass number of the eignemode freqs.
Makes a plot in HFSS that return a pandas dataframe:
```
re(Mode(1)) [g] re(Mode(2)) [g] re(Mode(3)) [g]
Pass []
1 4.643101 4.944204 5.586289
2 5.114490 5.505828 6.242423
3 5.278594 5.604426 6.296777
```
'''
return self.hfss_report_f_convergence(variation)
def set_mode(self, mode_num, phase=0):
'''
Set source excitations should be used for fields post processing.
Counting modes from 0 onward
'''
assert self.setup, "ERROR: There is no 'setup' connected. \N{face with medical mask}"
if mode_num < 0:
logger.error('Too small a mode number')
self.solutions.set_mode(mode_num + 1, phase)
if self.has_fields() == False:
logger.warning(f" Error: HFSS does not have field solution for mode={mode_num}.\
Skipping this mode in the analysis \N{face with medical mask}")
self.fields = self.setup.get_fields()
def get_variation_nominal(self):
return self.design.get_nominal_variation()
def get_num_variation_nominal(self):
try:
return str(self.listvariations.index(self.nominalvariation))
except:
return '0'
def get_variations_all(self):
self.update_variation_information()
return self.listvariations
def update_variation_information(self):
''''
Updates all information about the variations.
nmodes, listvariations, nominalvariation, nvariations
variations = ['0','1','2'] or [] for empty
'''
if self.setup:
self.listvariations = self.design._solutions.ListVariations(\
str(self.setup.solution_name))
self.nominalvariation = self.design.get_nominal_variation()
self.nvariations = np.size(self.listvariations)
self.variations = [str(i) for i in range(self.nvariations)]
self.num_variation_nominal = self.get_num_variation_nominal()
if self.design.solution_type == 'Eigenmode':
self.nmodes = int(self.setup.n_modes)
else:
self.nmodes = 0
def has_fields(self, variation=None):
'''
Determine if fields exist for a particular solution.
variation : str | None
If None, gets the nominal variation
'''
if self.solutions:
return self.solutions.has_fields(variation)
else:
False
def hfss_report_f_convergence(self, variation='0', save_csv=True):
'''
Create a report inside HFSS to plot the converge of freq and style it.
Saves report to csv file.
Returns a convergence vs pass number of the eignemode freqs.
Returns a pandas dataframe:
```
re(Mode(1)) [g] re(Mode(2)) [g] re(Mode(3)) [g]
Pass []
1 4.643101 4.944204 5.586289
2 5.114490 5.505828 6.242423
3 5.278594 5.604426 6.296777
```
'''
#TODO: Move to class for reporter ?
if not self.setup:
logger.error('NO SETUP PRESENT - hfss_report_f_convergence.')
return None
if not self.design.solution_type == 'Eigenmode':
return None
oDesign = self.design
variation = self.get_lv(variation)
report = oDesign._reporter
# Create report
ycomp = [f"re(Mode({i}))" for i in range(1,1+self.nmodes)]
params = ["Pass:=", ["All"]]+variation
report_name = "Freq. vs. pass"
if report_name in report.GetAllReportNames():
report.DeleteReports([report_name])
self.solutions.create_report(report_name, "Pass", ycomp, params, pass_name='AdaptivePass')
# Properties of lines
curves = [f"{report_name}:re(Mode({i})):Curve1" for i in range(1,1+self.nmodes)]
set_property(report, 'Attributes', curves, 'Line Width', 3)
set_property(report, 'Scaling', f"{report_name}:AxisY1", 'Auto Units', False)
set_property(report, 'Scaling', f"{report_name}:AxisY1", 'Units', 'g')
set_property(report, 'Legend', f"{report_name}:Legend", 'Show Solution Name', False)
if save_csv: # Save
try:
path = Path(self.data_dir)/'hfss_eig_f_convergence.csv'
report.ExportToFile(report_name,path)
logger.info(f'Saved convergences to {path}')
return pd.read_csv(path, index_col= 0)
except Exception as e:
logger.error(f"Error could not save and export hfss plot to {path}.\
Is the plot made in HFSS with the correct name.\
Check the HFSS error window. \t Error = {e}")
return None
def hfss_report_full_convergence(self, fig=None, _display=True):
"""Plot a full report of teh convergences of an eigenmode analysis for a
a given variation. Makes a plot inside hfss too.
Keyword Arguments:
fig {matpllitb figure} -- Optional figure (default: {None})
_display {bool} -- Force display or not. (default: {True})
Returns:
[type] -- [description]
"""
if fig is None:
fig = plt.figure(figsize=(11,3.))
for variation in self.variations:
fig.clf()
#Grid spec and axes; height_ratios=[4, 1], wspace=0.5
gs = mpl.gridspec.GridSpec(1, 3, width_ratios=[1.2, 1.5, 1])
axs = [fig.add_subplot(gs[i]) for i in range(3)]
logger.info(f'Creating report for variation {variation}')
convergence_t = self.get_convergence(variation=variation)
convergence_f = self.hfss_report_f_convergence(variation=variation)
ax0t = axs[1].twinx()
plot_convergence_f_vspass(axs[0], convergence_f)
plot_convergence_max_df(axs[1], convergence_t.iloc[:,1])
plot_convergence_solved_elem(ax0t, convergence_t.iloc[:,0])
plot_convergence_maxdf_vs_sol(axs[2], convergence_t.iloc[:,1],
convergence_t.iloc[:,0])
fig.tight_layout(w_pad=0.1)#pad=0.0, w_pad=0.1, h_pad=1.0)
if _display:
from IPython.display import display
display(fig)
return fig
#%%==============================================================================
### ANALYSIS FUNCTIONS
#==============================================================================
def pyEPR_ND(freqs, Ljs, ϕzpf,
cos_trunc=8,
fock_trunc=9,
use_1st_order=False,
return_H=False):
'''
Numerical diagonalizaiton for pyEPR.
:param fs: (GHz, not radians) Linearized model, H_lin, normal mode frequencies in Hz, length M
:param ljs: (Henries) junction linerized inductances in Henries, length J
:param fzpfs: (reduced) Reduced Zero-point fluctutation of the junction fluxes for each mode
across each junction, shape MxJ
:return: Hamiltonian mode freq and dispersive shifts. Shifts are in MHz.
Shifts have flipped sign so that down shift is positive.
'''
freqs, Ljs, ϕzpf = map(np.array, (freqs, Ljs, ϕzpf))
assert(all(freqs < 1E6)), "Please input the frequencies in GHz. \N{nauseated face}"
assert(all(Ljs < 1E-3)), "Please input the inductances in Henries. \N{nauseated face}"
Hs = bbq_hmt(freqs * 1E9, Ljs.astype(np.float), fluxQ*ϕzpf,
cos_trunc, fock_trunc, individual=use_1st_order)
f_ND, χ_ND, _, _ = make_dispersive(
Hs, fock_trunc, ϕzpf, freqs, use_1st_order=use_1st_order)
χ_ND = -1*χ_ND * 1E-6 # convert to MHz, and flip sign so that down shift is positive
return (f_ND, χ_ND, Hs) if return_H else (f_ND, χ_ND)
#==============================================================================
# ANALYSIS BBQ
#==============================================================================
class Results_Hamiltonian(OrderedDict):
'''
Class to store and process results from the analysis of $H_nl$.
'''
file_name_extra = ' Results_Hamiltonian.npz'
def __init__(self, dict_file=None, data_dir=None):
""" input:
dict file - 1. ethier None to create an empty results hamilitoninan as
as was done in the original code
2. or a string with the name of the file where the file of the
previously saved Results_Hamiltonian instatnce we wish
to load
3. or an existing instance of a dict class which will be
upgraded to the Results_Hamiltonian class
data_dir - the directory in which the file is to be saved or loaded
from, defults to the config.root_dir
"""
super().__init__()
self.sort_index = True # for retrieval
if data_dir is None:
data_dir = Path(config.root_dir) / 'temp' / \
time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
data_path = Path(data_dir).resolve()
file_name = data_path.stem
directory = data_path.parents[0]
if not directory.is_dir():
directory.mkdir(parents=True, exist_ok=True)
if dict_file is None:
self.file_name = str(directory/(str(file_name)+self.file_name_extra))
#logger.info(f'Filename hamiltonian params to {self.file_name }')
elif isinstance(dict_file, str):
try:
self.file_name = str(directory/dict_file)
self.load_from_npz()
except:
self.file_name = dict_file
self.load_from_npz()
elif isinstance(dict_file, dict):
# Depreciated
self.inject_dic(dict_file)
self.file_name = str(data_dir)+self.file_name_extra
else:
raise ValueError('type dict_file is of type {}'.format(type(dict_file)))
#load file
#TODO: make this savable and loadable
def save_to_npz(self, filename=None):
if filename is None:
filename = self.file_name
np.savez(filename, Res_Hamil=dict(self))
return filename
def load_from_npz(self, filename=None):
if filename is None:
filename = self.file_name
self.inject_dic(extract_dic(file_name=filename)[0])
return filename
def inject_dic(self, add_dic):
Init_number_of_keys = len(self.keys())
for key, val in add_dic.items():
###TODO remove all copies of same data
# if key in self.keys():
#raise ValueError('trying to overwrite an exsiting varation')
self[str(int(key)+Init_number_of_keys)] = val
return 1
@staticmethod
def do_sort_index(z:pd.DataFrame):
"""Overwrite to sort by custom function
Arguments:
z {pd.DataFrame} -- Input
Returns:
Sorted DtaaFrame
"""
if isinstance(z, pd.DataFrame):
return z.sort_index(axis=1)
else:
return z
def get_vs_variations(self, quantity: str, variations: list = None, vs='variation'):
"""
Arguments:
quantity {[type]} -- [description]
Keyword Arguments:
variations {list of strings} -- Variations (default: {None} -- means all)
vs {str} -- Swept against (default: {'variation'})
Returns:
[type] -- [description]
"""
res = OrderedDict()
variations = variations or self.keys()
for key in variations: # variation
if vs is 'variation':
res[key] = self[key][quantity]
else:
res[str(ureg.Quantity(self[key]['hfss_variables']['_'+vs]).magnitude
)] = self[key][quantity]
return res
def get_frequencies_HFSS(self, variations: list = None, vs='variation'):
z = sort_df_col(pd.DataFrame(self.get_vs_variations('f_0', variations=variations, vs=vs)))
if self.sort_index:
z = self.do_sort_index(z)
z.index.name = 'eigenmode'
z.columns.name = vs
return z
def get_frequencies_O1(self, variations: list = None, vs='variation'):
z = sort_df_col(pd.DataFrame(self.get_vs_variations('f_1', variations=variations, vs=vs)))
if self.sort_index:
z = self.do_sort_index(z)
z.index.name = 'eigenmode'
z.columns.name = vs
return z
def get_frequencies_ND(self, variations: list = None, vs='variation'):
z = sort_df_col(pd.DataFrame(self.get_vs_variations('f_ND', variations=variations, vs=vs)))
if self.sort_index:
z = self.do_sort_index(z)
z.index.name = 'eigenmode'
z.columns.name = vs
return z
def get_chi_O1(self, variations: list = None, vs='variation'):
z = self.get_vs_variations('chi_O1', variations=variations, vs=vs)
#if self.sort_index:
# z = self.do_sort_index(z)
return z
def get_chi_ND(self, variations: list = None, vs='variation'):
z = self.get_vs_variations('chi_ND', variations=variations, vs=vs)
#if self.sort_index:
# z = self.do_sort_index(z)
return z
class pyEPR_Analysis(object):
'''
Defines an analysis object which loads and plots data from a h5 file
This data is obtained using pyEPR_HFSSAnalysis
'''
def __init__(self, data_filename,
variations: list = None,
do_print_info=True,
Res_hamil_filename=None):
self.data_filename = data_filename
self.results = Results_Hamiltonian(dict_file=Res_hamil_filename,
data_dir=data_filename)
with open(str(data_filename), 'rb') as handle:
# Contain everything: project_info and results
self.data = Dict(pickle.load(handle))
# Reverse from variations on outside to on inside
results = pyEPR_HFSSAnalysis.results_variations_on_inside(self.data.results)
# Convinience functions
self.variations = variations or list(self.data.results.keys())
self.hfss_variables = results['hfss_variables']
self.freqs_hfss = results['freqs_hfss_GHz']
self.Qs = results['Qs']
self.Qm_coupling = results['Qm_coupling']
self.Phi_d = results['Drive_phase']
self.Ljs = results['Ljs'] # DataFrame
self.OM = results['Om'] # dict of dataframes
self.PM = results['Pm'] # participation matrices
self.SM = results['Sm'] # sign matrices
self.sols = results['sols']
self.mesh_stats = results['mesh']
self.convergence = results['convergence']
self.convergence_f_pass = results['convergence_f_pass']
self.nmodes = self.sols[self.variations[0]].shape[0]
self._renorm_pj = config.epr.renorm_pj
# Unique variation params -- make a get function
dum = DataFrame_col_diff(self.hfss_variables)
self.hfss_vars_diff_idx = dum if not (dum.any() == False) else []
try:
self.Num_hfss_vars_diff_idx =len(self.hfss_vars_diff_idx[self.hfss_vars_diff_idx==True])
except:
e = sys.exc_info()[0]
logger.warning( "<p>Error: %s</p>" % e )
self.Num_hfss_vars_diff_idx= 0
if do_print_info:
self.print_info()
@property
def project_info(self):
return self.data.project_info
def print_info(self):
print("\t Differences in variations:")
if len(self.hfss_vars_diff_idx) > 0:
print(self.hfss_variables[self.hfss_vars_diff_idx])
print('\n')
def get_variable_vs(self, swpvar, lv=None):
""" lv is list of variations (example ['0', '1']), if None it takes all variations
swpvar is the variable by which to orginize
return:
ordered dicitonary of key which is the variation number and the magnitude
of swaver as the item
"""
ret = OrderedDict()
if lv is None:
for key, varz in self.hfss_variables.items():
ret[key] = ureg.Quantity(varz['_'+swpvar]).magnitude
else:
try:
for key in lv:
ret[key] = ureg.Quantity(self.hfss_variables[key]['_'+swpvar]).magnitude
except:
print(' No such variation as ' + key)
return ret
def get_variable_value(self, swpvar, lv=None):
var = self.get_variable_vs(swpvar, lv=lv)
return [var[key] for key in var.keys()]
def get_variations_of_variable_value(self, swpvar, value, lv=None):
"""A function to return all the variations in which one of the variables
has a specific value lv is list of variations (example ['0', '1']),
if None it takes all variations
swpvar is a string and the name of the variable we wish to filter
value is the value of swapvr in which we are intrested
returns lv - a list of the variations for which swavr==value
"""
if lv is None:
lv = self.variations
ret = self.get_variable_vs(swpvar, lv=lv)
lv = np.array(list(ret.keys()))[np.array(list(ret.values())) == value]
#lv = lv_temp if not len(lv_temp) else lv
if not (len(lv)):
raise ValueError('No variations have the variable-' + swpvar +\
'= {}'.format(value))
return lv
def get_variation_of_multiple_variables_value(self, Var_dic, lv=None):
"""
SEE get_variations_of_variable_value
A function to return all the variations in which one of the variables has a specific value
lv is list of variations (example ['0', '1']), if None it takes all variations
Var_dic is a dic with the name of the variable as key and the value to filter as item
"""
if lv is None:
lv = self.variations
var_str = None
for key, var in Var_dic.items():
lv = self.get_variations_of_variable_value(key, var, lv)
if var_str is None:
var_str = key + '= {}'.format(var)
else:
var_str = var_str + ' & ' + key + '= {}'.format(var)
return lv, var_str
def get_convergences_Max_Tets(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = OrderedDict()
for key, df in self.convergence.items():
ret[key] = df['Solved Elements'].iloc[-1]
return ret
def get_convergences_Tets_vs_pass(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = OrderedDict()
for key, df in self.convergence.items():
s = df['Solved Elements']
#s.index = df['Pass Number']
ret[key] = s
return ret
def get_convergences_MaxDeltaFreq_vs_pass(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = OrderedDict()
for key, df in self.convergence.items():
s = df['Max Delta Freq. %']
#s.index = df['Pass Number']
ret[key] = s
return ret
def get_mesh_tot(self):
ret = OrderedDict()
for key, m in self.mesh_stats.items():
ret[key] = m['Num Tets '].sum()
return ret
'''
def get_solution_column(self, col_name, swp_var, sort = True):
# sort by variation -- must be numeri
Qs, swp = [], []
for key, sol in self.sols.items():
Qs += [ sol[col_name] ]
varz = self.hfss_variables[key]
swp += [ ureg.Quantity(varz['_'+swp_var]).magnitude ]
Qs = DataFrame(Qs, index = swp)
return Qs if not sort else Qs.sort_index()
'''
def get_Qs_vs_swp(self, swp_var, sort=True):
raise NotImplementedError()
return self.get_solution_column('modeQ', swp_var, sort)
def get_Fs_vs_swp(self, swp_var, sort=True):
raise NotImplementedError()
''' this returns the linear frequencies that HFSS gives'''
return self.get_solution_column('freq', swp_var, sort)
def get_Ejs(self, variation):
''' EJs in GHz '''
Ljs = self.Ljs[variation]
Ejs = fluxQ**2/Ljs/Planck*10**-9
return Ejs
def analyze_all_variations(self,
# None returns all_variations otherwis this is a
# list with number as strings ['0', '1']
variations=None,
Analyze_previous=False,
# set to true if you wish to
# overwrite previous analysis
**kwargs):
'''
See analyze_variation
'''
result = OrderedDict()
if variations is None:
variations = self.variations
for variation in variations:
if (not Analyze_previous) and (variation in self.results.keys()):
result[variation] = self.results[variation]
else:
result[variation] = self.analyze_variation(variation, **kwargs)
return result
def get_Pmj(self, variation, _renorm_pj=None, print_=False):
'''
Get normalized Pmj Matrix
Return DataFrame object for PJ
'''
if _renorm_pj is None:
_renorm_pj = self._renorm_pj
Pm = self.PM[variation].copy() # EPR matrix from Jsurf avg, DataFrame
if self._renorm_pj: # Renormalize
s = self.sols[variation]
# sum of participations as calculated by global UH and UE
Pm_glb_sum = (s['U_E'] - s['U_H'])/s['U_E']
Pm_norm = Pm_glb_sum/Pm.sum(axis=1)
# Should we still do this when Pm_glb_sum is very small
if print_:
print("Pm_norm = %s " % str(Pm_norm))
Pm = Pm.mul(Pm_norm, axis=0)
else:
Pm_norm = 1
if print_:
print('NO renorm!')
if np.any(Pm < 0.0):
print_color(" ! Warning: Some p_mj was found <= 0. This is probably a numerical error, or a super low-Q mode. We will take the abs value. Otherwise, rerun with more precision, inspect, and do due dilligence.)")
print(Pm, '\n')
Pm = np.abs(Pm)
return {'PJ': Pm, 'Pm_norm': Pm_norm}
def get_matrices(self, variation, _renorm_pj=None, print_=False):
'''
All as matrices
:PJ: Participatuion matrix, p_mj
:SJ: Sign matrix, s_mj
:Om: Omega_mm matrix (in GHz) (\hbar = 1) Not radians.
:EJ: E_jj matrix of Josephson energies (in same units as hbar omega matrix)
:PHI_zpf: ZPFs in units of \phi_0 reduced flux quantum
Return all as *np.array*
PM, SIGN, Om, EJ, Phi_ZPF
'''
#TODO: superseed by Convert.ZPF_from_EPR
PJ = self.get_Pmj(variation, _renorm_pj=_renorm_pj, print_=print_)
PJ = np.array(PJ['PJ'])
# Sign bits
SJ = np.array(self.SM[variation]) # DataFrame
# Frequencies of HFSS linear modes.
# Input in dataframe but of one line. Output nd array
Om = np.diagflat(self.OM[variation].values) # GHz
# Junction energies
EJ = np.diagflat(self.get_Ejs(variation).values) # GHz
logger.debug(PJ, SJ, Om, EJ)
PHI_zpf = CalcsBasic.epr_to_zpf(PJ, SJ, Om, EJ)
return PJ, SJ, Om, EJ, PHI_zpf # All as np.array
def analyze_variation(self,
variation,
cos_trunc = None,
fock_trunc = None,
print_result = True,
junctions = None,
modes = None):
##TODO avoide analyzing a previously analyzed variation
'''
Can also print results neatly.
Args:
junctions: list or slice of junctions to include in the analysis. None defaults to analysing all junctions
modes: list or slice of modes to include in the analysis. None defaults to analysing all modes
Returns
----------------------------
f_0 [MHz] : Eigenmode frequencies computed by HFSS; i.e., linear freq returned in GHz
f_1 [MHz] : Dressed mode frequencies (by the non-linearity; e.g., Lamb shift, etc. ). If numerical diagonalizaiton is run, then we return the numerically diagonalizaed frequencies, otherwise, use 1st order pertuirbation theory on the 4th order expansion of the cosine.
f_1 [MHz] : Numerical diagonalizaiton
chi_O1 [MHz] : Analytic expression for the chis based on a cos trunc to 4th order, and using 1st order perturbation theory. Diag is anharmonicity, off diag is full cross-Kerr.
chi_ND [MHz] : Numerically diagonalized chi matrix. Diag is anharmonicity, off diag is full cross-Kerr.
'''
junctions = (junctions,) if type(junctions) == int else junctions # ensuring proper matrix dimensionality when slicing
modes = (modes,) if type(modes) == int else modes # ensuring proper matrix dimensionality when slicing
if (fock_trunc == None) or (cos_trunc == None):
fock_trunc = cos_trunc = None
if print_result:
print('\n', '. '*40)
print('Variation %s\n' % variation)
else:
print('%s, ' % variation, end='')
# Get matrices
PJ, SJ, Om, EJ, PHI_zpf = self.get_matrices(variation)
freqs_hfss = self.freqs_hfss[variation].values
Ljs = self.Ljs[variation].values
# reduce matrices to only include certain modes/junctions
if junctions is not None:
Ljs = Ljs[junctions,]
PJ = PJ[:,junctions]
SJ = SJ[:,junctions]
EJ = EJ[:,junctions][junctions,:]
PHI_zpf = PHI_zpf[:,junctions]
if modes is not None:
freqs_hfss = freqs_hfss[modes,]
PJ = PJ[modes,:]
SJ = SJ[modes,:]
Om = Om[modes,:][:,modes]
PHI_zpf = PHI_zpf[modes,:]
# Analytic 4-th order
CHI_O1 = 0.25* Om @ PJ @ inv(EJ) @ PJ.T @ Om * 1000. # MHz
f1s = np.diag(Om) - 0.5*np.ndarray.flatten( np.array(CHI_O1.sum(1))) / 1000. # 1st order PT expect freq to be dressed down by alpha
CHI_O1 = divide_diagonal_by_2(CHI_O1) # Make the diagonals alpha
# Numerical diag
if cos_trunc is not None:
f1_ND, CHI_ND = pyEPR_ND(freqs_hfss,
Ljs,
PHI_zpf,
cos_trunc = cos_trunc,
fock_trunc = fock_trunc)
else:
f1_ND, CHI_ND = None, None
result = OrderedDict()
result['f_0'] = self.freqs_hfss[variation]*1E3 # MHz - obtained directly from HFSS
result['f_1'] = pd.Series(f1s)*1E3 # MHz
result['f_ND'] = pd.Series(f1_ND)*1E-6 # MHz
result['chi_O1'] = pd.DataFrame(CHI_O1)
result['chi_ND'] = pd.DataFrame(CHI_ND) # why dataframe?
result['ZPF'] = PHI_zpf
result['Pm_normed'] = PJ
result['_Pm_norm'] = self.get_Pmj(variation, _renorm_pj=self._renorm_pj,
print_=print_result)['Pm_norm'] # calling again
result['hfss_variables'] = self.hfss_variables[variation] # just propagate
result['Ljs'] = self.Ljs[variation]
result['Q_coupling'] = self.Qm_coupling[variation]
result['Drive_phase'] = self.Phi_d[variation]
result['Qs'] = self.Qs[variation]
result['fock_trunc'] = fock_trunc
result['cos_trunc'] = cos_trunc
self.results[variation] = result
self.results.save_to_npz()
if print_result:
self.print_variation(variation)
self.print_result(result)
return result
def print_variation(self, variation):
if len(self.hfss_vars_diff_idx) > 0:
print('\n*** Different parameters')
print(self.hfss_variables[self.hfss_vars_diff_idx][variation], '\n')
print('*** P (participation matrix, not normlz.)')
print(self.PM[variation])
print('\n*** S (sign-bit matrix)')
print(self.SM[variation])
def print_result(self, result):
# TODO: actually make into dataframe with mode labela and junction labels
pritm = lambda x, frmt="{:9.2g}": print_matrix(x, frmt=frmt)
print('*** P (participation matrix, normalized.)')
pritm(result['Pm_normed'])
print('\n*** Chi matrix O1 PT (MHz)\n Diag is anharmonicity, off diag is full cross-Kerr.')
pritm(result['chi_O1'], "{:9.3g}")
print('\n*** Chi matrix ND (MHz) ')
pritm(result['chi_ND'], "{:9.3g}")
print('\n*** Frequencies O1 PT (MHz)')
print(result['f_1'])
print('\n*** Frequencies ND (MHz)')
print(result['f_ND'])
print('\n*** Q_coupling')
print(result['Q_coupling'])
print('\n*** Drive phase (deg)')
print(result['Drive_phase'] * 180.0 / np.pi)
def plotting_dic_x(self, Var_dic, var_name):
dic = {}
if (len(Var_dic.keys())+1) == self.Num_hfss_vars_diff_idx:
lv, lv_str = self.get_variation_of_multiple_variables_value(Var_dic)
dic['label'] = lv_str
dic['x_label'] = var_name
dic['x'] = self.get_variable_value(var_name, lv=lv)
else:
raise ValueError('more than one hfss variablae changes each time')
return lv, dic
def plotting_dic_data(self, Var_dic, var_name, data_name):
lv, dic = self.plotting_dic_x()
dic['y_label'] = data_name
def plot_results(self, result, Y_label, variable, X_label, variations:list=None):
#TODO?
pass
def plot_Hresults(self,
sweep_variable: str = 'variation',
variations: list = None,
fig=None,
x_label: str = None):
"""Plot results versus variation
Keyword Arguments:
sweep_variable {str} -- Variable against which we swept. If noen, then just
take the variation index (default: {None})
variations {list} -- [description] (default: {None})
fig {[type]} -- [description] (default: {None})
Returns:
fig, axs
"""
x_label = x_label or sweep_variable
### Create figure and axes
if not fig:
fig, axs = plt.subplots(2, 2, figsize=(10, 6))
else:
axs = fig.axs
### Axis: Frequencies
ax = axs[0, 0]
ax.set_title('Modal frequencies (MHz)')
f0 = self.results.get_frequencies_HFSS(variations=variations, vs=sweep_variable)
f1 = self.results.get_frequencies_O1(variations=variations, vs=sweep_variable)
f_ND = self.results.get_frequencies_ND(variations=variations, vs=sweep_variable)
mode_idx = list(f1.index) # changed by Asaf from f0 as not all modes are always analyzed
nmodes = len(mode_idx)
cmap = cmap_discrete(nmodes)
# Which line to draw
if f_ND.empty:
plt_me_line = f1
markerf1 = 'o'
else:
plt_me_line = f_ND
markerf1 = '.'
#TODO: shouldmove these kwargs to the config
f_ND.transpose().plot(ax=ax, lw=0, marker='o', ms=4, legend=False, zorder=30,
color=cmap)
f0.transpose().plot(ax=ax, lw=0, marker='x', ms=2, legend=False, zorder=10, color=cmap)
f1.transpose().plot(ax=ax, lw=0, marker=markerf1, ms=4, legend=False, zorder=20, color=cmap)
plt_me_line.transpose().plot(ax=ax, lw=1, alpha=0.5, color='grey', legend=False)
### Axis: Quality factors'
ax = axs[1, 0]
ax.set_title('Quality factors')
Qs = self.Qs if variations is None else self.Qs[variations]
Qs.transpose().plot(ax=ax, lw=0, marker=markerf1, ms=4, legend=True, zorder=20, color=cmap)
Qs.transpose().plot(ax=ax, lw=1, alpha=0.2, color='grey', legend=False)
ax.set_yscale('log')
### Axis: Alpha and chi
axs[0][1].set_title('Anharmonicities (MHz)')
axs[1][1].set_title('Cross-Kerr frequencies (MHz)')
def plot_chi_alpha(chi, primary):
for i, m in enumerate(mode_idx):
ax = axs[0, 1]
z = sort_Series_idx(pd.Series({k: chim.loc[m, m] for k, chim in chi.items()}))
if self.results.sort_index:
try: # lazy
z.index = z.index.astype(float)
except Exception:
pass
z = z.sort_index(axis=0) # series
z.plot(ax=ax, lw=0, ms=4, label=m, color=cmap[i], marker='o' if primary else 'x')
if primary:
z.plot(ax=ax, lw=1, alpha=0.2, color='grey', label='_nolegend_')
for i, n in enumerate(mode_idx):
if int(n) > int(m):
# plot chi
ax = axs[1, 1]
z = sort_Series_idx(
pd.Series({k: chim.loc[m, n] for k, chim in chi.items()}))
if self.results.sort_index:
try:
z.index = z.index.astype(float)
except Exception:
pass
z = z.sort_index(axis=0) # series
z.plot(ax=ax, lw=0, ms=4, label=str(m)+','+str(n),
color=cmap[i], marker='o' if primary else 'x')
if primary:
z.plot(ax=ax, lw=1, alpha=0.2, color='grey', label='_nolegend_')
def do_legends():
legend_translucent(axs[0][1], leg_kw=dict(fontsize=7, title='Mode'))
legend_translucent(axs[1][1], leg_kw=dict(fontsize=7))
chiND = self.results.get_chi_ND(variations=variations, vs=sweep_variable)
chiO1 = self.results.get_chi_O1(variations=variations, vs=sweep_variable)
use_ND = not np.any([r['fock_trunc'] == None for k, r in self.results.items()])
if use_ND:
plot_chi_alpha(chiND, True)
do_legends()
plot_chi_alpha(chiO1, False)
else:
plot_chi_alpha(chiO1, True)
do_legends()
for ax1 in axs:
for ax in ax1:
ax.set_xlabel(x_label)
### Wrap up
fig.tight_layout()
return fig, axs
def extract_dic(name=None, file_name=None):
"""#name is the name of the dictionry as saved in the npz file if it is None,
the function will return a list of all dictionaries in the npz file
file name is the name of the npz file"""
with | np.load(file_name, allow_pickle=True) | numpy.load |
import pandas as pd
import numpy as np
import torch
import copy
import torch.nn as nn
import matplotlib.pyplot as plt
from torchvision import transforms
from data_loader import Resizer, LungDataset
from torch.utils.data import DataLoader
from sklearn.metrics import auc
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv3d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm3d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def class_weight(train_path, num_tasks=3):
df = pd.read_csv(train_path, header=None)
label_list = []
for i in range(num_tasks + 1):
label_list.append(df.iloc[:, i].tolist())
class_weight_dict = {}
for i in range(len(label_list)):
labels = label_list[i]
num_classes = len(np.unique(labels))
weight_list = []
for j in range(num_classes):
count = float(labels.count(int(j)))
weight = 1 / (count / float(len(labels)))
weight_list.append(weight)
class_weight_dict[i] = torch.FloatTensor(weight_list).cuda()
return class_weight_dict
def train_model(train_path, dataset_sizes, model, optimizer, scheduler, sub_task_weights, class_weight_dict, best_acc, dataloaders_dict, device, num_tasks=3, num_epochs=0):
best_model_wts = copy.deepcopy(model.state_dict())
train_loss = []
val_loss = []
train_acc = []
val_acc = []
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_loss_list = [0.0] * (num_tasks + 1)
label_corrects_list = [0.0] * (num_tasks + 1)
# Iterate over data.
for batch in dataloaders_dict[phase]:
image, labels = batch['img'].to(device), batch['label'].to(device)
labels = labels[:, 0]
# print(labels)
true_label_list = []
for i in range(num_tasks + 1):
true_label_list.append(labels[:, i].long())
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# print(inputs)
outputs = model(image)
# if phase == 'val':
# print(outputs)
class_weight_dict = class_weight(train_path, num_tasks=3)
loss_list = []
for i in range(num_tasks + 1):
criterion_weighted = nn.CrossEntropyLoss() # pass weights to all tasks
loss_list.append(criterion_weighted(outputs[i], true_label_list[i]))
# backward + optimize only if in training phase
if phase == 'train':
sub_task_weights = sub_task_weights.to(device)
loss = 0
for i in range(num_tasks):
loss += loss_list[i] * sub_task_weights[i]
loss = (loss / num_tasks) + loss_list[-1]
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * image.size(0)
for i in range(num_tasks + 1):
running_loss_list[i] += loss_list[i].item() * image.size(0)
label_corrects_list[i] += outputs[i].argmax(dim=1).eq(true_label_list[i]).sum().item()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_loss_list = []
label_acc_list = []
for i in range(num_tasks + 1):
epoch_loss_list.append(running_loss_list[i] / dataset_sizes[phase])
label_acc_list.append(label_corrects_list[i] / dataset_sizes[phase])
if phase == 'train':
train_loss.append(epoch_loss)
train_acc.append(label_acc_list[-1])
elif phase == 'val':
val_loss.append(epoch_loss)
val_acc.append(label_acc_list[-1])
# print loss
string = '{} total loss: {:.4f} '
args = [phase, epoch_loss]
for i in range(num_tasks + 1):
string += 'label' + str(i + 1) + '_loss: {:.4f} '
args.append(epoch_loss_list[i])
print(string.format(*args))
# print accuracy
string_2 = '{} '
args_2 = [phase]
for i in range(num_tasks + 1):
string_2 += 'label' + str(i + 1) + '_Acc: {:.4f} '
args_2.append(label_acc_list[i])
print(string_2.format(*args_2))
# deep copy the model
if phase == 'val' and label_acc_list[-1] > best_acc:
print('saving with acc of {}'.format(label_acc_list[-1]),
'improved over previous {}'.format(best_acc))
best_acc = label_acc_list[-1]
best_model_wts = copy.deepcopy(model.state_dict())
best_acc_size = label_acc_list[0]
best_acc_consistency = label_acc_list[1]
best_acc_margin = label_acc_list[2]
print('Best val acc: {:4f}'.format(float(best_acc)))
best_fold_acc = best_acc
best_fold_acc_size = best_acc_size
best_fold_acc_consistency = best_acc_consistency
best_fold_acc_margin = best_acc_margin
# load best model weights
model.load_state_dict(best_model_wts)
return model, train_loss, val_loss, train_acc, val_acc, best_fold_acc, best_fold_acc_size, best_fold_acc_consistency, best_fold_acc_margin
def plot_loss(train_loss, val_loss, train_acc, val_acc):
plt.figure()
plt.plot(train_loss, label='train loss')
plt.plot(val_loss, label='val loss')
plt.plot(train_acc, label='train acc')
plt.plot(val_acc, label='val acc')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
return plt.show()
def classify_image(model, val_path, train_indices, test_indices,record_id, device):
# set parameters to 0
tp = 0
tn = 0
fp = 0
fn = 0
true_label_list = []
score_list = []
# load image
test_path = cv_data(train_indices, test_indices, record_id, record_id)[-1]
val_data = LungDataset(val_path, transform=transforms.Compose([Resizer()]))
val_loader = DataLoader(val_data, shuffle=True, num_workers=4, batch_size=1)
model.eval()
for batch in val_loader:
image, labels = batch['img'].to(device), batch['label'].to(device)
labels = labels[:, -1]
true_label = int(labels[-1][-1].cpu().numpy()) # select label 4
true_label_list.append(true_label)
# print(true_label)
with torch.no_grad():
# make prediction\n",
pred = model(image)
score = pred[-1].tolist()[0][true_label]
# print(score)
# print(pred[-1])
score_list.append(score)
pred_label = pred[-1].argmax(dim=1)
# print(pred_label)
# make classification
if pred_label > 0.5:
pred_label = 1
else:
pred_label = 0
# update parameters
if pred_label == 1 and true_label == 1:
tp += 1
elif pred_label == 0 and true_label == 0:
tn += 1
elif pred_label == 1 and true_label == 0:
fp += 1
elif pred_label == 0 and true_label == 1:
fn += 1
else:
print('ERROR')
print('tp:', tp, 'tn:', tn, 'fp:', fp, 'fn:', fn)
return tp, tn, fp, fn, true_label_list, score_list
def cv_data(train_indices, test_indices, record_id, all_data):
train_path_list = []
train_label_list_1 = []
train_label_list_2 = []
train_label_list_3 = []
train_label_list_4 = []
test_path_list = []
test_label_list_1 = []
test_label_list_2 = []
test_label_list_3 = []
test_label_list_4 = []
for i in range(len(train_indices)):
train_patch_path_original = record_id[train_indices[i]]
train_patch_path_flipped_x = train_patch_path_original.split('.')[0] + '_flipped_x.npy'
train_patch_path_flipped_y = train_patch_path_original.split('.')[0] + '_flipped_y.npy'
train_patch_path_flipped_z = train_patch_path_original.split('.')[0] + '_flipped_z.npy'
train_path_list.append(train_patch_path_original)
train_label_list_1.append(int(all_data['label1'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_2.append(int(all_data['label2'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_3.append(int(all_data['label3'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_4.append(int(all_data['label4'][all_data['path'] == record_id[train_indices[i]]].values))
# add paths and labels of augmented patches to csv
train_path_list.append(train_patch_path_flipped_x)
train_label_list_1.append(int(all_data['label1'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_2.append(int(all_data['label2'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_3.append(int(all_data['label3'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_4.append(int(all_data['label4'][all_data['path'] == record_id[train_indices[i]]].values))
train_path_list.append(train_patch_path_flipped_y)
train_label_list_1.append(int(all_data['label1'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_2.append(int(all_data['label2'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_3.append(int(all_data['label3'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_4.append(int(all_data['label4'][all_data['path'] == record_id[train_indices[i]]].values))
train_path_list.append(train_patch_path_flipped_z)
train_label_list_1.append(int(all_data['label1'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_2.append(int(all_data['label2'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_3.append(int(all_data['label3'][all_data['path'] == record_id[train_indices[i]]].values))
train_label_list_4.append(int(all_data['label4'][all_data['path'] == record_id[train_indices[i]]].values))
for i in range(len(test_indices)):
test_path_list.append(record_id[test_indices[i]])
test_label_list_1.append(int(all_data['label1'][all_data['path'] == record_id[test_indices[i]]].values))
test_label_list_2.append(int(all_data['label2'][all_data['path'] == record_id[test_indices[i]]].values))
test_label_list_3.append(int(all_data['label3'][all_data['path'] == record_id[test_indices[i]]].values))
test_label_list_4.append(int(all_data['label4'][all_data['path'] == record_id[test_indices[i]]].values))
df_train = pd.DataFrame({'path': train_path_list,
'label1': train_label_list_1,
'label2': train_label_list_2,
'label3': train_label_list_3,
'label4': train_label_list_4})
df_test = pd.DataFrame({'path': test_path_list,
'label1': test_label_list_1,
'label2': test_label_list_2,
'label3': test_label_list_3,
'label4': test_label_list_4})
df_train.to_csv('path', header=False, index=False)
df_test.to_csv('path', header=False, index=False)
train_path = 'path'
val_path = 'path'
return train_path, val_path
def roc_curves(tprs, mean_fpr, aucs):
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8)
mean_tpr = | np.mean(tprs, axis=0) | numpy.mean |
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import pyiacsun
def test_fsvd():
A = np.array([[1,2,3,4],[3,5,6,7],[2,8,3,1]])
U2, S2, V2 = np.linalg.svd(A)
U, S, V = pyiacsun.linalg.fsvd(A, 9, 3, usePowerMethod=True)
npt.assert_allclose(S, S2)
def test_cholinvert():
A = np.array([[4,12,-16],[12,37,-43],[-16,-43,98]])
AInv = | np.linalg.inv(A) | numpy.linalg.inv |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the intersection-over-union metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.nn.metric import intersection_over_union
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
class IntersectionOverUnionTest(test_case.TestCase):
@parameterized.parameters(
# 1 dimensional grid.
([1., 0, 0, 1, 1, 0, 1], \
[1., 0, 1, 1, 1, 1, 0], 3. / 6.),
# 2 dimensional grid.
([[1., 0, 1], [0, 0, 1], [0, 1, 1]], \
[[0., 1, 1], [1, 1, 1], [0, 0, 1]], 3. / 8.),
([[0., 0, 1], [0, 0, 0]], \
[[1., 1, 0], [0, 0, 1]], 0.),
# Returns 1 if the prediction and ground-truth are all zeros.
([[0., 0, 0], [0, 0, 0]], \
[[0., 0, 0], [0, 0, 0]], 1.),
)
def test_evaluate_preset(self, ground_truth, predictions, expected_iou):
tensor_shape = random_tensor_shape()
grid_size = np.array(ground_truth).ndim
ground_truth_labels = | np.tile(ground_truth, tensor_shape + [1] * grid_size) | numpy.tile |
"""
This code is for illustration purpose only.
Use multi_agent.py for better performance and speed.
"""
import os
import numpy as np
import tensorflow as tf
# import env
import fixed_env as env
import a3c
import load_trace
S_INFO = 6 # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end
S_LEN = 8 # take how many frames in the past
A_DIM = 6
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BUFFER_PARAMETER=[10,20,30,40]
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
M_IN_K = 1000.0
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps
SMOOTH_PENALTY = 1
DEFAULT_QUALITY = 1 # default video quality without agent
RANDOM_SEED = 42
RAND_RANGE = 1000000
GRADIENT_BATCH_SIZE = 16
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
# log in format of time_stamp bit_rate buffer_size rebuffer_time chunk_size download_time reward
NN_MODEL = None
def main():
np.random.seed(RANDOM_SEED)
assert len(VIDEO_BIT_RATE) == A_DIM
all_cooked_time, all_cooked_bw, _ = load_trace.load_trace()
#print(all_cooked_bw)
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw)
with tf.Session() as sess, open(LOG_FILE, 'w') as log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
summary_ops, summary_vars = a3c.build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) # training monitor
saver = tf.train.Saver() # save neural net parameters
# restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
epoch = 0
time_stamp = 0
last_bit_rate = DEFAULT_QUALITY
bit_rate = DEFAULT_QUALITY
action_vec = np.zeros(A_DIM)
action_vec[bit_rate] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [action_vec]
r_batch = []
entropy_record = []
actor_gradient_batch = []
critic_gradient_batch = []
while True: # serve video forever
# the action is from the last decision
# this is to make the framework similar to the real
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_video_chunk_sizes, \
end_of_video,video_chunk_counter,throughput, video_chunk_remain = \
net_env.get_video_chunk(bit_rate)
#print(net_env.get_video_chunk(bit_rate))
time_stamp += delay # in ms
time_stamp += sleep_time # in ms
# reward is video quality - rebuffer penalty - smooth penalty
reward = VIDEO_BIT_RATE[bit_rate] / M_IN_K \
- REBUF_PENALTY * rebuf \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[bit_rate] -
VIDEO_BIT_RATE[last_bit_rate]) / M_IN_K
r_batch.append(reward)
last_bit_rate = bit_rate
# retrieve previous state
if len(s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(s_batch[-1], copy=True)
# print(state)
# dequeue history record
state = np.roll(state, -1, axis=1)
print('state',state)
# this should be S_INFO number of terms
state[0, -1] = VIDEO_BIT_RATE[bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality
state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec
state[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K # kilo byte / ms
state[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec
state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte
state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = | np.cumsum(action_prob) | numpy.cumsum |
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class KNN(object):
def fit(self, x, y, k=3):
self.x = x
self.y = y
self.k = k
def classify(self, point):
nearPosition = []
nearClass = []
amountNearClass = []
dist = self.calculateEuclidianDistance(self.x, point)
distOrdered = sorted(dist)
for i in range(self.k):
for j in range(len(dist)):
if distOrdered[i] == dist[j]:
nearPosition.append(j)
dist[j] = -1
for i in range(0, self.k):
nearClass.append(self.y[nearPosition[i]])
unique, counts = np.unique(nearClass, return_counts=True)
countClass = dict(zip(unique, counts))
max = -10
pointClass = -1
for key, number in countClass.items():
if max < number:
max = number
pointClass = key
return pointClass
def calculateEuclidianDistance(self,x, point):
dist = | np.array(x) | numpy.array |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import matplotlib.patches as mpatches
import sys, itertools
from scipy import stats
from BernoulliMixture import BernoulliMixture
import externalpaths
sys.path.append(externalpaths.arcplot())
import arcPlot
sys.path.append(externalpaths.ringmapper())
from ReactivityProfile import ReactivityProfile
class Cluster(object):
def __init__(self, inpfile=None):
self.p = None
self.rawprofiles = None
if inpfile is not None:
self.readfile(inpfile)
def readfile(self, inpfile):
self._readBMfile(inpfile)
def _readBMfile(self, inpfile):
bm = BernoulliMixture()
bm.readModelFromFile(inpfile, True)
self.p = bm.p
self.rawprofiles = bm.mu
self.inactive_columns = bm.inactive_columns
self.invalid_columns = np.where(bm.mu[0,:]<0)[0]
if self.inactive_columns is None:
self.inactive_columns = []
if self.invalid_columns is None:
self.invalid_columns = []
#for i in bm.inactive_columns:
# self.rawprofiles[:,i] = np.nan
self.sortByPopulation()
def sortByPopulation(self):
idx = range(len(self.p))
idx.sort(key=lambda x: self.p[x], reverse=True)
self.p = self.p[idx]
self.rawprofiles = self.rawprofiles[idx,:]
def alignModel(self, clust2):
if self.p.shape[0] > clust2.p.shape[0]:
raise ValueError('Ref Cluster must have lower dimension than comparison Cluster')
actlist = np.ones(self.rawprofiles.shape[1], dtype=bool)
with np.errstate(invalid='ignore'):
for i in range(len(self.p)):
actlist = actlist & np.isfinite(self.rawprofiles[i,:]) & (self.rawprofiles[i,:]>-1)
for i in range(len(clust2.p)):
actlist = actlist & np.isfinite(clust2.rawprofiles[i,:]) & (clust2.rawprofiles[i,:]>-1)
mindiff = 1000
for idx in itertools.permutations(range(len(clust2.p))):
ridx = idx[:self.p.shape[0]]
d = self.rawprofiles - clust2.rawprofiles[ridx,]
rmsdiff = np.square(d[:, actlist])
rmsdiff = np.sqrt( np.mean(rmsdiff) )
if rmsdiff < mindiff:
minidx = idx
mindiff = rmsdiff
return minidx
def returnMax(self):
ave = np.zeros(self.rawprofiles.shape[1])
for i in range(self.p.shape[0]):
ave += self.p[i]*self.rawprofiles[i,:]
p99 = np.percentile(ave[np.isfinite(ave)], 99)
p100 = np.percentile(ave[np.isfinite(ave)], 100)
return p100, p99, np.max(self.rawprofiles[np.isfinite(self.rawprofiles)])
class RPCluster(object):
def __init__(self, fname=None):
if fname is not None:
self.readReactivities(fname)
def readReactivities(self, fname):
with open(fname) as inp:
ncomp = int(inp.readline().split()[0])
nt = []
seq = []
bg = []
norm = [[] for x in range(ncomp)]
raw = [[] for x in range(ncomp)]
inactives = []
population = np.array(map(float,inp.readline().split()[1:]))
inp.readline()
for line in inp:
spl = line.split()
nt.append(int(spl[0]))
seq.append(spl[1])
for i in range(ncomp):
norm[i].append(float(spl[2+2*i]))
raw[i].append(float(spl[3+2*i]))
if spl[-1] == 'i':
inactives.append(int(spl[0])-1)
bg.append(float(spl[4+2*i]))
bg = np.array(bg)
norm = np.array(norm)
raw = np.array(raw)
nts = np.array(nt)
seq = np.array(seq)
profiles = []
for i in range(norm.shape[0]):
p = ReactivityProfile()
p.sequence = seq
p.nts = nts
p.normprofile = norm[i,:]
p.rawprofile = raw[i,:]
p.backprofile = bg
p.backgroundSubtract(normalize=False)
profiles.append(p)
self.population = population
self.profiles = profiles
self.nclusts = self.population.shape[0]
self.inactive_columns = inactives
self.invalid_columns = []
def renormalize(self):
for i in range(len(self.profiles)):
self.profiles[i].normalize(DMS=True)
def plotHist(self, name, nt, ax):
data = []
seqmask = self.profiles[0].sequence==nt
for i in range(len(self.profiles)):
react = self.profiles[i].profile(name)
react = react[seqmask]
react = react[ | np.isfinite(react) | numpy.isfinite |
"""Transformers
This module contains transformers for preprocessing data. Most operate on DataFrames and are named appropriately.
"""
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
#------------------------------------------------------
#from math import sqrt
#from sklearn.preprocessing import LabelEncoder, Imputer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
#from sklearn.linear_model import LinearRegression, LogisticRegression
#from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
#from sklearn.metrics import mean_squared_error
from healthcareai.common.healthcareai_error import HealthcareAIError
SUPPORTED_IMPUTE_STRATEGY = ['MeanMedian', 'RandomForest']
#-----------------------------------------------------
###############################################################################
class DataFrameImputer( TransformerMixin ):
"""
Impute missing values in a dataframe.
Columns of dtype object or category (assumed categorical) are imputed with the mode (most frequent value in column).
Columns of other types (assumed continuous) are imputed with mean of column.
"""
def __init__(self, excluded_columns=None, impute=True, verbose=True, imputeStrategy='MeanMedian' ):
self.impute = impute
self.object_columns = None
self.fill = None
self.verbose = verbose
self.impute_Object = None
self.excluded_columns = excluded_columns
self.imputeStrategy = imputeStrategy
def fit(self, X, y=None):
if self.impute is False:
return self
if ( self.imputeStrategy=='MeanMedian' or self.imputeStrategy==None ):
# Grab list of object column names before doing imputation
self.object_columns = X.select_dtypes(include=['object']).columns.values
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O')
or pd.api.types.is_categorical_dtype(X[c])
else X[c].mean() for c in X], index=X.columns)
if self.verbose:
num_nans = sum(X.select_dtypes(include=[np.number]).isnull().sum())
num_total = sum(X.select_dtypes(include=[np.number]).count())
percentage_imputed = num_nans / num_total * 100
print("Percentage Imputed: %.2f%%" % percentage_imputed)
print("Note: Impute will always happen on prediction dataframe, otherwise rows are dropped, and will lead "
"to missing predictions")
# return self for scikit compatibility
return self
elif ( self.imputeStrategy=='RandomForest' ):
self.impute_Object = DataFrameImputerRandomForest( excluded_columns=self.excluded_columns )
self.impute_Object.fit(X)
return self
elif ( self.imputeStrategy=='BaggedTree' ):
self.impute_Object = DataFrameImputerBaggedTree()
self.impute_Object.fit(X)
return self
else:
raise HealthcareAIError('A imputeStrategy must be one of these types: {}'.format(SUPPORTED_IMPUTE_STRATEGY))
def transform(self, X, y=None):
# Return if not imputing
if self.impute is False:
return X
if ( self.imputeStrategy=='MeanMedian' or self.imputeStrategy==None ):
result = X.fillna(self.fill)
for i in self.object_columns:
if result[i].dtype not in ['object', 'category']:
result[i] = result[i].astype('object')
return result
elif ( self.imputeStrategy=='RandomForest' ):
result = self.impute_Object.transform(X)
return result
elif ( self.imputeStrategy=='BaggedTree' ):
result = self.impute_Object.transform(X)
return result
else:
raise HealthcareAIError('A imputeStrategy must be one of these types: {}'.format(SUPPORTED_IMPUTE_STRATEGY))
class DataFrameImputerBaggedTree(TransformerMixin):
"""
Impute missing values in a dataframe.
"""
def __init__(self, impute=True, verbose=True):
self.impute = impute
self.object_columns = None
self.fill = None
self.verbose = verbose
def fit(self, X, y=None):
# Return if not imputing
if self.impute is False:
return self
# Grab list of object column names before doing imputation
self.object_columns = X.select_dtypes(include=['object']).columns.values
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == | np.dtype('O') | numpy.dtype |
import time
import yaml
import wget
import cv2
from utils import *
from base_models import AlexNet, C3DNet, convert_to_fcn, C3DNet2
from base_models import I3DNet
from tensorflow.keras.layers import Input, Concatenate, Dense
from tensorflow.keras.layers import GRU, LSTM, GRUCell
from tensorflow.keras.layers import Dropout, LSTMCell, RNN
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Flatten, Average, Add
from tensorflow.keras.layers import ConvLSTM2D, Conv2D
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from tensorflow.keras.applications import vgg16, resnet50
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Lambda, dot, concatenate, Activation
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.utils import Sequence
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
## For deeplabV3 (segmentation)
import numpy as np
from PIL import Image
import matplotlib
import tensorflow as tf
from matplotlib import gridspec
from matplotlib import pyplot as plt
import tarfile
import os
import time
import scipy.misc
import cv2
# from tensorflow.compat.v1 import ConfigProto
# from tensorflow.compat.v1 import InteractiveSession
# config = ConfigProto()
# config.gpu_options.allow_growth = True
# session = InteractiveSession(config=config)
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.models import Model
import numpy as np
###############################################
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.compat.v1.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.compat.v1.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
def create_cityscapes_label_colormap():
"""Creates a label colormap used in CITYSCAPES segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
# colormap = np.zeros((256, 3), dtype=np.uint8)
# colormap[0] = [128, 64, 128]
# colormap[1] = [244, 35, 232]
# colormap[2] = [70, 70, 70]
# colormap[3] = [102, 102, 156]
# colormap[4] = [190, 153, 153]
# colormap[5] = [153, 153, 153]
# colormap[6] = [250, 170, 30]
# colormap[7] = [220, 220, 0]
# colormap[8] = [107, 142, 35]
# colormap[9] = [152, 251, 152]
# colormap[10] = [70, 130, 180]
# colormap[11] = [220, 20, 60]
# colormap[12] = [255, 0, 0]
# colormap[13] = [0, 0, 142]
# colormap[14] = [0, 0, 70]
# colormap[15] = [0, 60, 100]
# colormap[16] = [0, 80, 100]
# colormap[17] = [0, 0, 230]
# colormap[18] = [119, 11, 32]
# return colormap
colormap = | np.zeros((256, 3), dtype=np.uint8) | numpy.zeros |
from keras.layers import Input, Reshape, Dropout, Dense, Flatten, BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model, load_model
#from keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam
import numpy as np
from PIL import Image
import os
# Preview image Frame
PREVIEW_ROWS = 4
PREVIEW_COLS = 7
PREVIEW_MARGIN = 4
SAVE_FREQ = 100
# Size vector to generate images from
NOISE_SIZE = 100
# Configuration
EPOCHS = 10000 # number of iterations
BATCH_SIZE = 32
GENERATE_RES = 3
IMAGE_SIZE = 128 # rows/cols
IMAGE_CHANNELS = 3
#training_data = np.load('/mnt/cubism_data.npy')
print(os.getcwd())
training_data = np.load(os.path.join('/mnt/cubism_data.npy', 'cubism_data.npy'))
def build_discriminator(image_shape):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2,
input_shape=image_shape, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding='same'))
model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(512, kernel_size=3, strides=1, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
input_image = Input(shape=image_shape)
validity = model(input_image)
return Model(input_image, validity)
def build_generator(noise_size, channels):
model = Sequential()
model.add(Dense(4 * 4 * 256, activation='relu', input_dim=noise_size))
model.add(Reshape((4, 4, 256)))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation('relu'))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation('relu'))
for i in range(GENERATE_RES):
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation('relu'))
model.summary()
model.add(Conv2D(channels, kernel_size=3, padding='same'))
model.add(Activation('tanh'))
input = Input(shape=(noise_size,))
generated_image = model(input)
return Model(input, generated_image)
def save_images(cnt, noise):
image_array = np.full((
PREVIEW_MARGIN + (PREVIEW_ROWS * (IMAGE_SIZE + PREVIEW_MARGIN)),
PREVIEW_MARGIN + (PREVIEW_COLS * (IMAGE_SIZE + PREVIEW_MARGIN)), 3),
255, dtype=np.uint8)
generated_images = generator.predict(noise)
generated_images = 0.5 * generated_images + 0.5
image_count = 0
for row in range(PREVIEW_ROWS):
for col in range(PREVIEW_COLS):
r = row * (IMAGE_SIZE + PREVIEW_MARGIN) + PREVIEW_MARGIN
c = col * (IMAGE_SIZE + PREVIEW_MARGIN) + PREVIEW_MARGIN
image_array[r:r + IMAGE_SIZE, c:c + IMAGE_SIZE] = generated_images[image_count] * 255
image_count += 1
output_path = 'output'
if not os.path.exists(output_path):
os.makedirs(output_path)
filename = os.path.join(output_path, f'trained-{cnt}.png')
im = Image.fromarray(image_array)
im.save(filename)
image_shape = (IMAGE_SIZE, IMAGE_SIZE, IMAGE_CHANNELS)
optimizer = Adam(1.5e-4, 0.5)
discriminator = build_discriminator(image_shape)
discriminator.compile(loss='binary_crossentropy',optimizer=optimizer, metrics=['accuracy'])
generator = build_generator(NOISE_SIZE, IMAGE_CHANNELS)
random_input = Input(shape=(NOISE_SIZE,))
generated_image = generator(random_input)
discriminator.trainable = False
validity = discriminator(generated_image)
combined = Model(random_input, validity)
combined.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
y_real = np.ones((BATCH_SIZE, 1))
y_fake = np.zeros((BATCH_SIZE, 1))
fixed_noise = np.random.normal(0, 1, (PREVIEW_ROWS * PREVIEW_COLS, NOISE_SIZE))
cnt = 1
for epoch in range(EPOCHS):
idx = np.random.randint(0, training_data.shape[0], BATCH_SIZE)
x_real = training_data[idx]
noise= | np.random.normal(0, 1, (BATCH_SIZE, NOISE_SIZE)) | numpy.random.normal |
# 2021.03.20
# @yifan
#
import numpy as np
from skimage.util import view_as_windows
from scipy.fftpack import dct, idct
def Shrink(X, win):
X = view_as_windows(X, (1,win,win,1), (1,win,win,1))
return X.reshape(X.shape[0], X.shape[1], X.shape[2], -1)
def invShrink(X, win):
S = X.shape
X = X.reshape(S[0], S[1], S[2], -1, 1, win, win, 1)
X = np.moveaxis(X, 5, 2)
X = np.moveaxis(X, 6, 4)
return X.reshape(S[0], win*S[1], win*S[2], -1)
class DCT():
def __init__(self, N=8, P=8):
self.N = N
self.P = P
self.W = 8
self.H = 8
def transform(self, a):
S = list(a.shape)
a = a.reshape(-1, self.N, self.P, 1)
a = dct(dct(a, axis=1, norm='ortho'), axis=2, norm='ortho')
return a.reshape(S)
def inverse_transform(self, a):
S = list(a.shape)
a = a.reshape(-1, self.N, self.P, 1)
a = idct(idct(a, axis=1, norm='ortho'), axis=2, norm='ortho')
return a.reshape(S)
def ML_inverse_transform(self, Xraw, X):
llsr = LLSR(onehot=False)
llsr.fit(X.reshape(-1, X.shape[-1]), Xraw.reshape(-1, X.shape[-1]))
S = X.shape
X = llsr.predict_proba(X.reshape(-1, X.shape[-1])).reshape(S)
return X
class ZigZag():
def __init__(self):
self.idx = []
def zig_zag(self, i, j, n):
if i + j >= n:
return n * n - 1 - self.zig_zag(n - 1 - i, n - 1 - j, n)
k = (i + j) * (i + j + 1) // 2
return k + i if (i + j) & 1 else k + j
def zig_zag_getIdx(self, N):
idx = np.zeros((N, N))
for i in range(N):
for j in range(N):
idx[i, j] = self.zig_zag(i, j, N)
return idx.reshape(-1)
def transform(self, X):
self.idx = self.zig_zag_getIdx((int)(np.sqrt(X.shape[-1]))).astype('int32')
S = list(X.shape)
X = X.reshape(-1, X.shape[-1])
return X[:, np.argsort(self.idx)].reshape(S)
def inverse_transform(self, X):
self.idx = self.zig_zag_getIdx((int)(np.sqrt(X.shape[-1]))).astype('int32')
S = list(X.shape)
X = X.reshape(-1, X.shape[-1])
return X[:, self.idx].reshape(S)
class LLSR():
def __init__(self, onehot=True, normalize=False):
self.onehot = onehot
self.normalize = normalize
self.weight = []
def fit(self, X, Y):
if self.onehot == True:
Y = np.eye(len(np.unique(Y)))[Y.reshape(-1)]
A = | np.ones((X.shape[0], 1)) | numpy.ones |
import os
import numpy as np
import random
from enum import IntEnum
import matplotlib.pyplot as plt
import gym
from gym import error, spaces
from gym.utils import seeding
class GoalGridWorldEnv(gym.GoalEnv):
"""
A simple 2D grid world environment with goal-oriented reward.
Compatible with the OpenAI GoalEnv class.
Observations are a dict of 'observation', 'achieved_goal', and 'desired goal'
"""
class Actions(IntEnum):
# Move
left = 0
down = 1
right = 2
up = 3
class ObjectTypes(IntEnum):
# Object types
empty = 0
agent = 1
wall = 2
lava = 3
MOVE_DIRECTION = [[0,-1],[1,0],[0,1],[-1,0]] # up, right, down, left
def __init__(self, grid_size=16, max_step=100, grid_file=None, random_init_loc=True, \
agent_loc_file=None, goal_file=None, seed=1337):
# Action enumeration
self.actions = GoalGridWorldEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Object types
self.objects = GoalGridWorldEnv.ObjectTypes
# Whether to change initialization of the agent
self.random_init_loc = random_init_loc
# Environment configuration
self.grid_size = grid_size
self.max_step = max_step
self.end_of_game = False
if grid_file:
curr_abs_path = os.path.dirname(os.path.abspath(__file__))
rel_path = os.path.join(curr_abs_path, "grid_samples", grid_file)
if os.path.exists(rel_path):
grid_file = rel_path
self.grid = np.loadtxt(grid_file, delimiter=',')
# Overwrite grid size if necessary
self.grid_size_0 = self.grid.shape[0]
self.grid_size_1 = self.grid.shape[1]
else:
print("Cannot find path: {}".format(rel_path))
else:
# Generate an empty grid
self.grid = np.zeros((self.grid_size_0, self.grid_size_1), dtype=np.int)
# Sample the agent
self.goal_loc = self._sample_goal_loc()
self.goal = | np.copy(self.grid) | numpy.copy |
# coding=utf-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import itertools
import math
import os
import random
# GOOGLE-INITIALIZATION
import apache_beam as beam
from apache_beam.testing import util as beam_test_util
import numpy as np
import six
from six.moves import range
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import analyzers
from tensorflow_transform import schema_inference
from tensorflow_transform.beam import impl as beam_impl
from tensorflow_transform.beam import tft_unit
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.python.ops import lookup_ops
from tensorflow_metadata.proto.v0 import schema_pb2
_SCALE_TO_Z_SCORE_TEST_CASES = [
dict(testcase_name='int16',
input_data=np.array([[1], [1], [2], [2]], np.int16),
output_data=np.array([[-1.0], [-1.0], [1.0], [1.0]], np.float32),
elementwise=False),
dict(testcase_name='int32',
input_data=np.array([[1], [1], [2], [2]], np.int32),
output_data=np.array([[-1.0], [-1.0], [1.0], [1.0]], np.float32),
elementwise=False),
dict(testcase_name='int64',
input_data=np.array([[1], [1], [2], [2]], np.int64),
output_data=np.array([[-1.0], [-1.0], [1.0], [1.0]], np.float32),
elementwise=False),
dict(testcase_name='float32',
input_data=np.array([[1], [1], [2], [2]], np.float32),
output_data=np.array([[-1.0], [-1.0], [1.0], [1.0]], np.float32),
elementwise=False),
dict(testcase_name='float64',
input_data=np.array([[1], [1], [2], [2]], np.float64),
output_data=np.array([[-1.0], [-1.0], [1.0], [1.0]], np.float64),
elementwise=False),
dict(testcase_name='vector',
input_data=np.array([[1, 2], [3, 4]], np.float32),
output_data=np.array([[-3, -1], [1, 3]] / np.sqrt(5.0), np.float32),
elementwise=False),
dict(testcase_name='vector_elementwise',
input_data=np.array([[1, 2], [3, 4]], np.float32),
output_data=np.array([[-1.0, -1.0], [1.0, 1.0]], np.float32),
elementwise=True),
dict(testcase_name='zero_varance',
input_data=np.array([[3], [3], [3], [3]], np.float32),
output_data=np.array([[0], [0], [0], [0]], np.float32),
elementwise=False),
dict(testcase_name='zero_variance_elementwise',
input_data=np.array([[3, 4], [3, 4]], np.float32),
output_data=np.array([[0, 0], [0, 0]], np.float32),
elementwise=True),
]
def _construct_test_bucketization_parameters():
args_without_dtype = (
(range(1, 10), [4, 7], False, None, False, False),
(range(1, 100), [26, 51, 76], False, None, False, False),
# The following is similar to range(1, 100) test above, except that
# only odd numbers are in the input; so boundaries differ (26 -> 27 and
# 76 -> 77).
(range(1, 100, 2), [27, 51, 77], False, None, False, False),
# Test some inversely sorted inputs, and with different strides, and
# boundaries/buckets.
(range(9, 0, -1), [4, 7], False, None, False, False),
(range(19, 0, -1), [11], False, None, False, False),
(range(99, 0, -1), [51], False, None, False, False),
(range(99, 0, -1), [34, 67], False, None, False, False),
(range(99, 0, -2), [34, 68], False, None, False, False),
(range(99, 0, -1), range(11, 100, 10), False, None, False, False),
# These tests do a random shuffle of the inputs, which must not affect the
# boundaries (or the computed buckets).
(range(99, 0, -1), range(11, 100, 10), True, None, False, False),
(range(1, 100), range(11, 100, 10), True, None, False, False),
# The following test is with multiple batches (3 batches with default
# batch of 1000).
(range(1, 3000), [1503], False, None, False, False),
(range(1, 3000), [1001, 2001], False, None, False, False),
# Test with specific error for bucket boundaries. This is same as the test
# above with 3 batches and a single boundary, but with a stricter error
# tolerance (0.001) than the default error (0.01). The result is that the
# computed boundary in the test below is closer to the middle (1501) than
# that computed by the boundary of 1503 above.
(range(1, 3000), [1501], False, 0.001, False, False),
# Test with specific error for bucket boundaries, with more relaxed error
# tolerance (0.1) than the default (0.01). Now the boundary diverges
# further to 1519 (compared to boundary of 1501 with error 0.001, and
# boundary of 1503 with error 0.01).
(range(1, 3000), [1519], False, 0.1, False, False),
# Tests for tft.apply_buckets.
(range(1, 100), [26, 51, 76], False, 0.00001, True, False),
# TODO(b/78569039): Enable this test.
# (range(1, 100), [26, 51, 76], False, 0.00001, True, True),
)
dtypes = (tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.double)
return (x + (dtype,) for x in args_without_dtype for dtype in dtypes)
def _canonical_dtype(dtype):
"""Returns int64 for int dtypes and float32 for float dtypes."""
if dtype.is_floating:
return tf.float32
elif dtype.is_integer:
return tf.int64
else:
raise ValueError('Bad dtype {}'.format(dtype))
def sum_output_dtype(input_dtype):
"""Returns the output dtype for tft.sum."""
return input_dtype if input_dtype.is_floating else tf.int64
def _mean_output_dtype(input_dtype):
"""Returns the output dtype for tft.mean (and similar functions)."""
return tf.float64 if input_dtype == tf.float64 else tf.float32
class BeamImplTest(tft_unit.TransformTestCase):
def setUp(self):
tf.compat.v1.logging.info('Starting test case: %s', self._testMethodName)
self._context = beam_impl.Context(use_deep_copy_optimization=True)
self._context.__enter__()
def tearDown(self):
self._context.__exit__()
def testApplySavedModelSingleInput(self):
def save_model_with_single_input(instance, export_dir):
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
with instance.test_session(graph=tf.Graph()) as sess:
input1 = tf.compat.v1.placeholder(
dtype=tf.int64, shape=[3], name='myinput1')
initializer = tf.compat.v1.constant_initializer([1, 2, 3])
with tf.compat.v1.variable_scope(
'Model', reuse=None, initializer=initializer):
v1 = tf.compat.v1.get_variable('v1', [3], dtype=tf.int64)
output1 = tf.add(v1, input1, name='myadd1')
inputs = {'single_input': input1}
outputs = {'single_output': output1}
signature_def_map = {
'serving_default':
tf.compat.v1.saved_model.signature_def_utils
.predict_signature_def(inputs, outputs)
}
sess.run(tf.compat.v1.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.SERVING], signature_def_map=signature_def_map)
builder.save(False)
export_dir = os.path.join(self.get_temp_dir(), 'saved_model_single')
def preprocessing_fn(inputs):
x = inputs['x']
output_col = tft.apply_saved_model(
export_dir, x, tags=[tf.saved_model.SERVING])
return {'out': output_col}
save_model_with_single_input(self, export_dir)
input_data = [
{'x': [1, 2, 3]},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([3], tf.int64),
})
# [1, 2, 3] + [1, 2, 3] = [2, 4, 6]
expected_data = [
{'out': [2, 4, 6]}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'out': tf.io.FixedLenFeature([3], tf.int64)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
def testApplySavedModelWithHashTable(self):
def save_model_with_hash_table(instance, export_dir):
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
with instance.test_session(graph=tf.Graph()) as sess:
key = tf.constant('test_key', shape=[1])
value = tf.constant('test_value', shape=[1])
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(key, value), '__MISSING__')
input1 = tf.compat.v1.placeholder(
dtype=tf.string, shape=[1], name='myinput')
output1 = tf.reshape(table.lookup(input1), shape=[1])
inputs = {'input': input1}
outputs = {'output': output1}
signature_def_map = {
'serving_default':
tf.compat.v1.saved_model.signature_def_utils
.predict_signature_def(inputs, outputs)
}
sess.run(table.init)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.SERVING], signature_def_map=signature_def_map)
builder.save(False)
export_dir = os.path.join(self.get_temp_dir(), 'saved_model_hash_table')
def preprocessing_fn(inputs):
x = inputs['x']
output_col = tft.apply_saved_model(
export_dir, x, tags=[tf.saved_model.SERVING])
return {'out': output_col}
save_model_with_hash_table(self, export_dir)
input_data = [
{'x': ['test_key']}
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([1], tf.string),
})
expected_data = [
{'out': b'test_value'}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'out': tf.io.FixedLenFeature([], tf.string)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
def testApplySavedModelMultiInputs(self):
def save_model_with_multi_inputs(instance, export_dir):
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
with instance.test_session(graph=tf.Graph()) as sess:
input1 = tf.compat.v1.placeholder(
dtype=tf.int64, shape=[3], name='myinput1')
input2 = tf.compat.v1.placeholder(
dtype=tf.int64, shape=[3], name='myinput2')
input3 = tf.compat.v1.placeholder(
dtype=tf.int64, shape=[3], name='myinput3')
initializer = tf.compat.v1.constant_initializer([1, 2, 3])
with tf.compat.v1.variable_scope(
'Model', reuse=None, initializer=initializer):
v1 = tf.compat.v1.get_variable('v1', [3], dtype=tf.int64)
o1 = tf.add(v1, input1, name='myadd1')
o2 = tf.subtract(o1, input2, name='mysubtract1')
output1 = tf.add(o2, input3, name='myadd2')
inputs = {'name1': input1, 'name2': input2,
'name3': input3}
outputs = {'single_output': output1}
signature_def_map = {
'serving_default':
tf.compat.v1.saved_model.signature_def_utils
.predict_signature_def(inputs, outputs)
}
sess.run(tf.compat.v1.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.SERVING], signature_def_map=signature_def_map)
builder.save(False)
export_dir = os.path.join(self.get_temp_dir(), 'saved_model_multi')
def preprocessing_fn(inputs):
x = inputs['x']
y = inputs['y']
z = inputs['z']
sum_column = tft.apply_saved_model(
export_dir, {
'name1': x,
'name3': z,
'name2': y
},
tags=[tf.saved_model.SERVING])
return {'sum': sum_column}
save_model_with_multi_inputs(self, export_dir)
input_data = [
{'x': [1, 2, 3], 'y': [2, 3, 4], 'z': [1, 1, 1]},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([3], tf.int64),
'y': tf.io.FixedLenFeature([3], tf.int64),
'z': tf.io.FixedLenFeature([3], tf.int64),
})
# [1, 2, 3] + [1, 2, 3] - [2, 3, 4] + [1, 1, 1] = [1, 2, 3]
expected_data = [
{'sum': [1, 2, 3]}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'sum': tf.io.FixedLenFeature([3], tf.int64)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
def testApplyFunctionWithCheckpoint(self):
def tensor_fn(input1, input2):
initializer = tf.compat.v1.constant_initializer([1, 2, 3])
with tf.compat.v1.variable_scope(
'Model', reuse=None, initializer=initializer):
v1 = tf.compat.v1.get_variable('v1', [3], dtype=tf.int64)
v2 = tf.compat.v1.get_variable('v2', [3], dtype=tf.int64)
o1 = tf.add(v1, v2, name='add1')
o2 = tf.subtract(o1, input1, name='sub1')
o3 = tf.subtract(o2, input2, name='sub2')
return o3
def save_checkpoint(instance, checkpoint_path):
with instance.test_session(graph=tf.Graph()) as sess:
input1 = tf.compat.v1.placeholder(
dtype=tf.int64, shape=[3], name='myinput1')
input2 = tf.compat.v1.placeholder(
dtype=tf.int64, shape=[3], name='myinput2')
tensor_fn(input1, input2)
saver = tf.compat.v1.train.Saver()
sess.run(tf.compat.v1.global_variables_initializer())
saver.save(sess, checkpoint_path)
checkpoint_path = os.path.join(self.get_temp_dir(), 'chk')
def preprocessing_fn(inputs):
x = inputs['x']
y = inputs['y']
out_value = tft.apply_function_with_checkpoint(
tensor_fn, [x, y], checkpoint_path)
return {'out': out_value}
save_checkpoint(self, checkpoint_path)
input_data = [
{'x': [2, 2, 2], 'y': [-1, -3, 1]},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([3], tf.int64),
'y': tf.io.FixedLenFeature([3], tf.int64),
})
# [1, 2, 3] + [1, 2, 3] - [2, 2, 2] - [-1, -3, 1] = [1, 5, 3]
expected_data = [
{'out': [1, 5, 3]}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'out': tf.io.FixedLenFeature([3], tf.int64)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.named_parameters(('NoDeepCopy', False), ('WithDeepCopy', True))
def testMultipleLevelsOfAnalyzers(self, with_deep_copy):
# Test a preprocessing function similar to scale_to_0_1 except that it
# involves multiple interleavings of analyzers and transforms.
def preprocessing_fn(inputs):
scaled_to_0 = inputs['x'] - tft.min(inputs['x'])
scaled_to_0_1 = scaled_to_0 / tft.max(scaled_to_0)
return {'x_scaled': scaled_to_0_1}
input_data = [{'x': 4}, {'x': 1}, {'x': 5}, {'x': 2}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.float32)})
expected_data = [
{'x_scaled': 0.75},
{'x_scaled': 0.0},
{'x_scaled': 1.0},
{'x_scaled': 0.25}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([], tf.float32)})
with beam_impl.Context(use_deep_copy_optimization=with_deep_copy):
# NOTE: In order to correctly test deep_copy here, we can't pass test_data
# to assertAnalyzeAndTransformResults.
# Not passing test_data to assertAnalyzeAndTransformResults means that
# tft.AnalyzeAndTransform is called, exercising the right code path.
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
def testRawFeedDictInput(self):
# Test the ability to feed raw data into AnalyzeDataset and TransformDataset
# by using subclasses of these transforms which create batches of size 1.
def preprocessing_fn(inputs):
sequence_example = inputs['sequence_example']
# Ordinarily this would have shape (batch_size,) since 'sequence_example'
# was defined as a FixedLenFeature with shape (). But since we specified
# desired_batch_size, we can assume that the shape is (1,), and reshape
# to ().
sequence_example = tf.reshape(sequence_example, ())
# Parse the sequence example.
feature_spec = {
'x':
tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, default_value=None)
}
_, sequences = tf.io.parse_single_sequence_example(
sequence_example, sequence_features=feature_spec)
# Create a batch based on the sequence "x".
return {'x': sequences['x']}
def text_sequence_example_to_binary(text_proto):
proto = text_format.Merge(text_proto, example_pb2.SequenceExample())
return proto.SerializeToString()
sequence_examples = [
"""
feature_lists: {
feature_list: {
key: "x"
value: {
feature: {bytes_list: {value: 'ab'}}
feature: {bytes_list: {value: ''}}
feature: {bytes_list: {value: 'c'}}
feature: {bytes_list: {value: 'd'}}
}
}
}
""",
"""
feature_lists: {
feature_list: {
key: "x"
value: {
feature: {bytes_list: {value: 'ef'}}
feature: {bytes_list: {value: 'g'}}
}
}
}
"""
]
input_data = [
{'sequence_example': text_sequence_example_to_binary(sequence_example)}
for sequence_example in sequence_examples]
input_metadata = tft_unit.metadata_from_feature_spec(
{'sequence_example': tf.io.FixedLenFeature([], tf.string)})
expected_data = [
{'x': b'ab'},
{'x': b''},
{'x': b'c'},
{'x': b'd'},
{'x': b'ef'},
{'x': b'g'}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.string)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata, desired_batch_size=1)
def testTransformWithExcludedOutputs(self):
def preprocessing_fn(inputs):
return {
'x_scaled': tft.scale_to_0_1(inputs['x']),
'y_scaled': tft.scale_to_0_1(inputs['y'])
}
# Run AnalyzeAndTransform on some input data and compare with expected
# output.
input_data = [{'x': 5, 'y': 1}, {'x': 1, 'y': 2}]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32)
})
with beam_impl.Context(temp_dir=self.get_temp_dir()):
transform_fn = (
(input_data, input_metadata) | beam_impl.AnalyzeDataset(
preprocessing_fn))
# Take the transform function and use TransformDataset to apply it to
# some eval data, with missing 'y' column.
eval_data = [{'x': 6}]
eval_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.float32)})
transformed_eval_data, transformed_eval_metadata = (
((eval_data, eval_metadata), transform_fn)
| beam_impl.TransformDataset(exclude_outputs=['y_scaled']))
expected_transformed_eval_data = [{'x_scaled': 1.25}]
expected_transformed_eval_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([], tf.float32)})
self.assertDataCloseOrEqual(transformed_eval_data,
expected_transformed_eval_data)
self.assertEqual(transformed_eval_metadata.dataset_metadata,
expected_transformed_eval_metadata)
def testMapWithCond(self):
def preprocessing_fn(inputs):
return {
'a':
tf.cond(
pred=tf.constant(True),
true_fn=lambda: inputs['a'],
false_fn=lambda: inputs['b'])
}
input_data = [
{'a': 4, 'b': 3},
{'a': 1, 'b': 2},
{'a': 5, 'b': 6},
{'a': 2, 'b': 3}
]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.float32),
'b': tf.io.FixedLenFeature([], tf.float32)
})
expected_data = [
{'a': 4},
{'a': 1},
{'a': 5},
{'a': 2}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.float32)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
def testPyFuncs(self):
def my_multiply(x, y):
return x*y
def my_add(x, y):
return x+y
def preprocessing_fn(inputs):
result = {
'a+b': tft.apply_pyfunc(
my_add, tf.float32, True, 'add', inputs['a'], inputs['b']),
'a+c': tft.apply_pyfunc(
my_add, tf.float32, True, 'add', inputs['a'], inputs['c']),
'ab': tft.apply_pyfunc(
my_multiply, tf.float32, False, 'multiply',
inputs['a'], inputs['b']),
'sum_scaled': tft.scale_to_0_1(
tft.apply_pyfunc(
my_add, tf.float32, True, 'add', inputs['a'], inputs['c']))
}
for value in result.values():
value.set_shape([1,])
return result
input_data = [
{'a': 4, 'b': 3, 'c': 2},
{'a': 1, 'b': 2, 'c': 3},
{'a': 5, 'b': 6, 'c': 7},
{'a': 2, 'b': 3, 'c': 4}
]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.float32),
'b': tf.io.FixedLenFeature([], tf.float32),
'c': tf.io.FixedLenFeature([], tf.float32)
})
expected_data = [
{'ab': 12, 'a+b': 7, 'a+c': 6, 'sum_scaled': 0.25},
{'ab': 2, 'a+b': 3, 'a+c': 4, 'sum_scaled': 0},
{'ab': 30, 'a+b': 11, 'a+c': 12, 'sum_scaled': 1},
{'ab': 6, 'a+b': 5, 'a+c': 6, 'sum_scaled': 0.25}
]
# When calling tf.py_func, the output shape is set to unknown.
expected_metadata = tft_unit.metadata_from_feature_spec({
'ab': tf.io.FixedLenFeature([], tf.float32),
'a+b': tf.io.FixedLenFeature([], tf.float32),
'a+c': tf.io.FixedLenFeature([], tf.float32),
'sum_scaled': tf.io.FixedLenFeature([], tf.float32)
})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
def testWithMoreThanDesiredBatchSize(self):
def preprocessing_fn(inputs):
return {
'ab': tf.multiply(inputs['a'], inputs['b']),
'i': tft.compute_and_apply_vocabulary(inputs['c'])
}
batch_size = 100
num_instances = batch_size + 1
input_data = [{
'a': 2,
'b': i,
'c': '%.10i' % i, # Front-padded to facilitate lexicographic sorting.
} for i in range(num_instances)]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.float32),
'b': tf.io.FixedLenFeature([], tf.float32),
'c': tf.io.FixedLenFeature([], tf.string)
})
expected_data = [{
'ab': 2*i,
'i': (len(input_data) - 1) - i, # Due to reverse lexicographic sorting.
} for i in range(len(input_data))]
expected_metadata = tft_unit.metadata_from_feature_spec({
'ab': tf.io.FixedLenFeature([], tf.float32),
'i': tf.io.FixedLenFeature([], tf.int64),
}, {
'i':
schema_pb2.IntDomain(
min=-1, max=num_instances - 1, is_categorical=True)
})
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_metadata,
desired_batch_size=batch_size)
def testWithUnicode(self):
def preprocessing_fn(inputs):
return {'a b': tf.strings.join([inputs['a'], inputs['b']], separator=' ')}
input_data = [{'a': 'Hello', 'b': 'world'}, {'a': 'Hello', 'b': u'κόσμε'}]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.string),
'b': tf.io.FixedLenFeature([], tf.string),
})
expected_data = [
{'a b': b'Hello world'},
{'a b': u'Hello κόσμε'.encode('utf-8')}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'a b': tf.io.FixedLenFeature([], tf.string)})
self.assertAnalyzeAndTransformResults(
input_data, input_metadata, preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters((True,), (False,))
def testScaleUnitInterval(self, elementwise):
def preprocessing_fn(inputs):
outputs = {}
cols = ('x', 'y')
for col, scaled_t in zip(
cols,
tf.unstack(
tft.scale_to_0_1(
tf.stack([inputs[col] for col in cols], axis=1),
elementwise=elementwise),
axis=1)):
outputs[col + '_scaled'] = scaled_t
return outputs
input_data = [{
'x': 4,
'y': 5
}, {
'x': 1,
'y': 2
}, {
'x': 5,
'y': 6
}, {
'x': 2,
'y': 3
}]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32)
})
if elementwise:
expected_data = [{
'x_scaled': 0.75,
'y_scaled': 0.75
}, {
'x_scaled': 0.0,
'y_scaled': 0.0
}, {
'x_scaled': 1.0,
'y_scaled': 1.0
}, {
'x_scaled': 0.25,
'y_scaled': 0.25
}]
else:
expected_data = [{
'x_scaled': 0.6,
'y_scaled': 0.8
}, {
'x_scaled': 0.0,
'y_scaled': 0.2
}, {
'x_scaled': 0.8,
'y_scaled': 1.0
}, {
'x_scaled': 0.2,
'y_scaled': 0.4
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature([], tf.float32),
'y_scaled': tf.io.FixedLenFeature([], tf.float32)
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters((False,))
def testScaleUnitIntervalPerKey(self, elementwise):
def preprocessing_fn(inputs):
outputs = {}
cols = ('x', 'y')
for col, scaled_t in zip(
cols,
tf.unstack(
tft.scale_to_0_1_per_key(
tf.stack([inputs[col] for col in cols], axis=1),
inputs['key'],
elementwise=False),
axis=1)):
outputs[col + '_scaled'] = scaled_t
return outputs
input_data = [{
'x': 4,
'y': 5,
'key': 'a'
}, {
'x': 1,
'y': 2,
'key': 'a'
}, {
'x': 5,
'y': 6,
'key': 'a'
}, {
'x': 2,
'y': 3,
'key': 'a'
}, {
'x': 25,
'y': -25,
'key': 'b'
}, {
'x': 5,
'y': 0,
'key': 'b'
}]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32),
'key': tf.io.FixedLenFeature([], tf.string)
})
expected_data = [{
'x_scaled': 0.6,
'y_scaled': 0.8
}, {
'x_scaled': 0.0,
'y_scaled': 0.2
}, {
'x_scaled': 0.8,
'y_scaled': 1.0
}, {
'x_scaled': 0.2,
'y_scaled': 0.4
}, {
'x_scaled': 1.0,
'y_scaled': 0.0
}, {
'x_scaled': 0.6,
'y_scaled': 0.5
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature([], tf.float32),
'y_scaled': tf.io.FixedLenFeature([], tf.float32)
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters((True,), (False,))
def testScaleMinMax(self, elementwise):
def preprocessing_fn(inputs):
outputs = {}
cols = ('x', 'y')
for col, scaled_t in zip(
cols,
tf.unstack(
tft.scale_by_min_max(
tf.stack([inputs[col] for col in cols], axis=1),
output_min=-1,
output_max=1,
elementwise=elementwise),
axis=1)):
outputs[col + '_scaled'] = scaled_t
return outputs
input_data = [{
'x': 4,
'y': 8
}, {
'x': 1,
'y': 5
}, {
'x': 5,
'y': 9
}, {
'x': 2,
'y': 6
}]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32)
})
if elementwise:
expected_data = [{
'x_scaled': 0.5,
'y_scaled': 0.5
}, {
'x_scaled': -1.0,
'y_scaled': -1.0
}, {
'x_scaled': 1.0,
'y_scaled': 1.0
}, {
'x_scaled': -0.5,
'y_scaled': -0.5
}]
else:
expected_data = [{
'x_scaled': -0.25,
'y_scaled': 0.75
}, {
'x_scaled': -1.0,
'y_scaled': 0.0
}, {
'x_scaled': 0.0,
'y_scaled': 1.0
}, {
'x_scaled': -0.75,
'y_scaled': 0.25
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature([], tf.float32),
'y_scaled': tf.io.FixedLenFeature([], tf.float32)
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters((False,))
def testScaleMinMaxPerKey(self, elementwise=False):
def preprocessing_fn(inputs):
outputs = {}
cols = ('x', 'y')
for col, scaled_t in zip(
cols,
tf.unstack(
tft.scale_by_min_max_per_key(
tf.stack([inputs[col] for col in cols], axis=1),
inputs['key'],
output_min=-1,
output_max=1,
elementwise=False),
axis=1)):
outputs[col + '_scaled'] = scaled_t
return outputs
input_data = [{
'x': 4,
'y': 8,
'key': 'a'
}, {
'x': 1,
'y': 5,
'key': 'a'
}, {
'x': 5,
'y': 9,
'key': 'a'
}, {
'x': 2,
'y': 6,
'key': 'a'
}, {
'x': -2,
'y': 0,
'key': 'b'
}, {
'x': 0,
'y': 2,
'key': 'b'
}]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32),
'key': tf.io.FixedLenFeature([], tf.string)
})
expected_data = [{
'x_scaled': -0.25,
'y_scaled': 0.75
}, {
'x_scaled': -1.0,
'y_scaled': 0.0
}, {
'x_scaled': 0.0,
'y_scaled': 1.0
}, {
'x_scaled': -0.75,
'y_scaled': 0.25
}, {
'x_scaled': -1.0,
'y_scaled': 0.0
}, {
'x_scaled': 0.0,
'y_scaled': 1.0
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature([], tf.float32),
'y_scaled': tf.io.FixedLenFeature([], tf.float32)
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
def testScaleMinMaxConstant(self):
def preprocessing_fn(inputs):
return {'x_scaled': tft.scale_by_min_max(inputs['x'], 0, 10)}
input_data = [{'x': 4}, {'x': 4}, {'x': 4}, {'x': 4}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.float32)})
expected_data = [{
'x_scaled': 5
}, {
'x_scaled': 5
}, {
'x_scaled': 5
}, {
'x_scaled': 5
}]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([], tf.float32)})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
def testScaleMinMaxConstantElementwise(self):
def preprocessing_fn(inputs):
outputs = {}
cols = ('x', 'y')
for col, scaled_t in zip(
cols,
tf.unstack(
tft.scale_by_min_max(
tf.stack([inputs[col] for col in cols], axis=1),
output_min=0,
output_max=10,
elementwise=True),
axis=1)):
outputs[col + '_scaled'] = scaled_t
return outputs
input_data = [{
'x': 4,
'y': 1
}, {
'x': 4,
'y': 1
}, {
'x': 4,
'y': 2
}, {
'x': 4,
'y': 2
}]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.float32),
'y': tf.io.FixedLenFeature([], tf.float32)
})
expected_data = [{
'x_scaled': 5,
'y_scaled': 0
}, {
'x_scaled': 5,
'y_scaled': 0
}, {
'x_scaled': 5,
'y_scaled': 10
}, {
'x_scaled': 5,
'y_scaled': 10
}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature([], tf.float32),
'y_scaled': tf.io.FixedLenFeature([], tf.float32)
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
def testScaleMinMaxError(self):
def preprocessing_fn(inputs):
return {'x_scaled': tft.scale_by_min_max(inputs['x'], 2, 1)}
input_data = [{'x': 1}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.float32)})
expected_data = [{'x_scaled': float('nan')}]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([], tf.float32)})
with self.assertRaises(ValueError) as context:
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
self.assertTrue(
'output_min must be less than output_max' in str(context.exception))
@tft_unit.named_parameters(_SCALE_TO_Z_SCORE_TEST_CASES)
def testScaleToZScore(self, input_data, output_data, elementwise):
def preprocessing_fn(inputs):
x = inputs['x']
x_cast = tf.cast(x, tf.as_dtype(input_data.dtype))
x_scaled = tft.scale_to_z_score(x_cast, elementwise=elementwise)
self.assertEqual(x_scaled.dtype, tf.as_dtype(output_data.dtype))
return {'x_scaled': tf.cast(x_scaled, tf.float32)}
input_data_dicts = [{'x': x} for x in input_data]
expected_data_dicts = [{'x_scaled': x_scaled} for x_scaled in output_data]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature(
input_data.shape[1:],
_canonical_dtype(tf.as_dtype(input_data.dtype))),
})
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature(output_data.shape[1:], tf.float32),
})
self.assertAnalyzeAndTransformResults(input_data_dicts, input_metadata,
preprocessing_fn, expected_data_dicts,
expected_metadata)
@tft_unit.parameters(*itertools.product([
tf.int16,
tf.int32,
tf.int64,
tf.float32,
tf.float64,
], (True, False)))
def testScaleToZScoreSparse(self, input_dtype, elementwise):
def preprocessing_fn(inputs):
z_score = tf.sparse.to_dense(
tft.scale_to_z_score(
tf.cast(inputs['x'], input_dtype), elementwise=elementwise),
default_value=np.nan)
z_score.set_shape([None, 4])
self.assertEqual(z_score.dtype, _mean_output_dtype(input_dtype))
return {
'x_scaled': tf.cast(z_score, tf.float32)
}
input_data = [
{'idx': [0, 1], 'val': [-4, 10]},
{'idx': [0, 1], 'val': [2, 4]},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.SparseFeature('idx', 'val', _canonical_dtype(input_dtype), 4)
})
if elementwise:
# Mean(x) = [-1, 7]
# Var(x) = [9, 9]
# StdDev(x) = [3, 3]
expected_data = [
{
'x_scaled': [-1., 1.,
float('nan'),
float('nan')] # [(-4 +1 ) / 3, (10 -7) / 3]
},
{
'x_scaled': [1., -1.,
float('nan'),
float('nan')] # [(2 + 1) / 3, (4 - 7) / 3]
}
]
else:
# Mean = 3
# Var = 25
# Std Dev = 5
expected_data = [
{
'x_scaled': [-1.4, 1.4, float('nan'),
float('nan')] # [(-4 - 3) / 5, (10 - 3) / 5]
},
{
'x_scaled': [-.2, .2, float('nan'),
float('nan')] # [(2 - 3) / 5, (4 - 3) / 5]
}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([4], tf.float32)})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters(*itertools.product([
tf.int16,
tf.int32,
tf.int64,
tf.float32,
tf.float64,
]))
def testScaleToZScorePerSparseKey(self, input_dtype):
# TODO(b/131852830) Add elementwise tests.
def preprocessing_fn(inputs):
def scale_to_z_score_per_key(tensor, key):
z_score = tft.scale_to_z_score_per_key(
tf.cast(tensor, input_dtype), key=key, elementwise=False)
self.assertEqual(z_score.dtype, _mean_output_dtype(input_dtype))
return tf.cast(z_score, tf.float32)
return {
'x_scaled': scale_to_z_score_per_key(inputs['x'], inputs['key']),
'y_scaled': scale_to_z_score_per_key(inputs['y'], inputs['key']),
}
input_data = [{
'x': [-4., 2.],
'y': [0., 0],
'key': ['a', 'a'],
}, {
'x': [10., 4.],
'y': [0., 0],
'key': ['a', 'a'],
}, {
'x': [1., -1.],
'y': [0., 0],
'key': ['b', 'b'],
}]
# Mean(x) = 3, Mean(y) = 0
# Var(x) = (-7^2 + -1^2 + 7^2 + 1^2) / 4 = 25, Var(y) = 0
# StdDev(x) = 5, StdDev(y) = 0
# 'b':
# Mean(x) = 0, Mean(y) = 0
# Var(x) = 1, Var(y) = 0
# StdDev(x) = 1, StdDev(y) = 0
expected_data = [
{
'x_scaled': [-1.4, -.2], # [(-4 - 3) / 5, (2 - 3) / 5]
'y_scaled': [0., 0.],
},
{
'x_scaled': [1.4, .2], # [(10 - 3) / 5, (4 - 3) / 5]
'y_scaled': [0., 0.],
},
{
'x_scaled': [1., -1.], # [(1 - 0) / 1, (-1 - 0) / 1]
'y_scaled': [0., 0.],
}
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.VarLenFeature(_canonical_dtype(input_dtype)),
'y': tf.io.VarLenFeature(_canonical_dtype(input_dtype)),
'key': tf.io.VarLenFeature(tf.string),
})
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.VarLenFeature(tf.float32),
'y_scaled': tf.io.VarLenFeature(tf.float32),
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters(*itertools.product([
tf.int16,
tf.int32,
tf.int64,
tf.float32,
tf.float64,
]))
def testScaleToZScorePerKey(self, input_dtype):
# TODO(b/131852830) Add elementwise tests.
def preprocessing_fn(inputs):
def scale_to_z_score_per_key(tensor, key):
z_score = tft.scale_to_z_score_per_key(
tf.cast(tensor, input_dtype), key=key, elementwise=False)
self.assertEqual(z_score.dtype, _mean_output_dtype(input_dtype))
return tf.cast(z_score, tf.float32)
return {
'x_scaled': scale_to_z_score_per_key(inputs['x'], inputs['key']),
'y_scaled': scale_to_z_score_per_key(inputs['y'], inputs['key']),
's_scaled': scale_to_z_score_per_key(inputs['s'], inputs['key']),
}
input_data = [{
'x': [-4.],
'y': [0.],
's': 3.,
'key': 'a',
}, {
'x': [10.],
'y': [0.],
's': -3.,
'key': 'a',
}, {
'x': [1.],
'y': [0.],
's': 3.,
'key': 'b',
}, {
'x': [2.],
'y': [0.],
's': 3.,
'key': 'a',
}, {
'x': [4.],
'y': [0.],
's': -3.,
'key': 'a',
}, {
'x': [-1.],
'y': [0.],
's': -3.,
'key': 'b',
}]
# 'a':
# Mean(x) = 3, Mean(y) = 0
# Var(x) = (-7^2 + -1^2 + 7^2 + 1^2) / 4 = 25, Var(y) = 0
# StdDev(x) = 5, StdDev(y) = 0
# 'b':
# Mean(x) = 0, Mean(y) = 0
# Var(x) = 1, Var(y) = 0
# StdDev(x) = 1, StdDev(y) = 0
expected_data = [
{
'x_scaled': [-1.4], # [(-4 - 3) / 5, (2 - 3) / 5]
'y_scaled': [0.],
's_scaled': 1.,
},
{
'x_scaled': [1.4], # [(10 - 3) / 5, (4 - 3) / 5]
'y_scaled': [0.],
's_scaled': -1.,
},
{
'x_scaled': [1.], # [(1 - 0) / 1, (-1 - 0) / 1]
'y_scaled': [0.],
's_scaled': 1.,
},
{
'x_scaled': [-.2], # [(-4 - 3) / 5, (2 - 3) / 5]
'y_scaled': [0.],
's_scaled': 1.,
},
{
'x_scaled': [.2], # [(10 - 3) / 5, (4 - 3) / 5]
'y_scaled': [0.],
's_scaled': -1.,
},
{
'x_scaled': [-1.], # [(1 - 0) / 1, (-1 - 0) / 1]
'y_scaled': [0.],
's_scaled': -1.,
}
]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([1], _canonical_dtype(input_dtype)),
'y': tf.io.FixedLenFeature([1], _canonical_dtype(input_dtype)),
's': tf.io.FixedLenFeature([], _canonical_dtype(input_dtype)),
'key': tf.io.FixedLenFeature([], tf.string),
})
expected_metadata = tft_unit.metadata_from_feature_spec({
'x_scaled': tf.io.FixedLenFeature([1], tf.float32),
'y_scaled': tf.io.FixedLenFeature([1], tf.float32),
's_scaled': tf.io.FixedLenFeature([], tf.float32),
})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
@tft_unit.parameters(*itertools.product([
tf.int16,
tf.int32,
tf.int64,
tf.float32,
tf.float64,
]))
def testScaleToZScoreSparsePerKey(self, input_dtype):
# TODO(b/131852830) Add elementwise tests.
def preprocessing_fn(inputs):
z_score = tf.sparse.to_dense(
tft.scale_to_z_score_per_key(
tf.cast(inputs['x'], input_dtype),
inputs['key'],
elementwise=False),
default_value=np.nan)
z_score.set_shape([None, 4])
self.assertEqual(z_score.dtype, _mean_output_dtype(input_dtype))
return {
'x_scaled': tf.cast(z_score, tf.float32)
}
input_data = [
{'idx': [0, 1], 'val': [-4, 10], 'key_idx': [0, 1], 'key': ['a', 'a']},
{'idx': [0, 1], 'val': [2, 1], 'key_idx': [0, 1], 'key': ['a', 'b']},
{'idx': [0, 1], 'val': [-1, 4], 'key_idx': [0, 1], 'key': ['b', 'a']},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'key': tf.io.SparseFeature('key_idx', 'key', tf.string, 4),
'x': tf.io.SparseFeature('idx', 'val', _canonical_dtype(input_dtype), 4)
})
# 'a':
# Mean = 3
# Var = 25
# Std Dev = 5
# 'b':
# Mean = 0
# Var = 1
# Std Dev = 1
expected_data = [
{
'x_scaled': [-1.4, 1.4, float('nan'),
float('nan')] # [(-4 - 3) / 5, (10 - 3) / 5]
},
{
'x_scaled': [-.2, 1., float('nan'),
float('nan')] # [(2 - 3) / 5, (1 - 0) / 1]
},
{
'x_scaled': [-1., .2,
float('nan'),
float('nan')] # [(-1 - 0) / 1, (4 - 3) / 5]
}
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{'x_scaled': tf.io.FixedLenFeature([4], tf.float32)})
self.assertAnalyzeAndTransformResults(input_data, input_metadata,
preprocessing_fn, expected_data,
expected_metadata)
def testMeanAndVar(self):
def analyzer_fn(inputs):
mean, var = analyzers._mean_and_var(inputs['x'])
return {
'mean': mean,
'var': var
}
# NOTE: We force 10 batches: data has 100 elements and we request a batch
# size of 10.
input_data = [{'x': [x]}
for x in range(1, 101)]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([1], tf.int64)
})
# The expected data has 2 boundaries that divides the data into 3 buckets.
expected_outputs = {
'mean': np.float32(50.5),
'var': np.float32(833.25)
}
self.assertAnalyzerOutputs(
input_data,
input_metadata,
analyzer_fn,
expected_outputs,
desired_batch_size=10)
def testMeanAndVarPerKey(self):
def analyzer_fn(inputs):
key_vocab, mean, var = analyzers._mean_and_var_per_key(
inputs['x'], inputs['key'])
return {
'key_vocab': key_vocab,
'mean': mean,
'var': tf.round(100 * var) / 100.0
}
# NOTE: We force 10 batches: data has 100 elements and we request a batch
# size of 10.
input_data = [{'x': [x], 'key': 'a' if x < 50 else 'b'}
for x in range(1, 101)]
input_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([1], tf.int64),
'key': tf.io.FixedLenFeature([], tf.string)
})
# The expected data has 2 boundaries that divides the data into 3 buckets.
expected_outputs = {
'key_vocab': np.array([b'a', b'b'], np.object),
'mean': np.array([25, 75], np.float32),
'var': np.array([200, 216.67], np.float32)
}
self.assertAnalyzerOutputs(
input_data,
input_metadata,
analyzer_fn,
expected_outputs,
desired_batch_size=10)
@tft_unit.named_parameters(('Int64In', tf.int64, {
'min': tf.int64,
'max': tf.int64,
'sum': tf.int64,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Int32In', tf.int32, {
'min': tf.int32,
'max': tf.int32,
'sum': tf.int64,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Int16In', tf.int16, {
'min': tf.int16,
'max': tf.int16,
'sum': tf.int64,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Float64In', tf.float64, {
'min': tf.float64,
'max': tf.float64,
'sum': tf.float64,
'size': tf.int64,
'mean': tf.float64,
'var': tf.float64
}), ('Float32In', tf.float32, {
'min': tf.float32,
'max': tf.float32,
'sum': tf.float32,
'size': tf.int64,
'mean': tf.float32,
'var': tf.float32
}), ('Float16In', tf.float16, {
'min': tf.float16,
'max': tf.float16,
'sum': tf.float32,
'size': tf.int64,
'mean': tf.float16,
'var': tf.float16
}))
def testNumericAnalyzersWithScalarInputs(self, input_dtype, output_dtypes):
def analyzer_fn(inputs):
a = tf.cast(inputs['a'], input_dtype)
def assert_and_cast_dtype(tensor, out_dtype):
self.assertEqual(tensor.dtype, out_dtype)
return tf.cast(tensor, _canonical_dtype(out_dtype))
return {
'min': assert_and_cast_dtype(tft.min(a),
output_dtypes['min']),
'max': assert_and_cast_dtype(tft.max(a),
output_dtypes['max']),
'sum': assert_and_cast_dtype(tft.sum(a),
output_dtypes['sum']),
'size': assert_and_cast_dtype(tft.size(a),
output_dtypes['size']),
'mean': assert_and_cast_dtype(tft.mean(a),
output_dtypes['mean']),
'var': assert_and_cast_dtype(tft.var(a),
output_dtypes['var']),
}
input_data = [{'a': 4}, {'a': 1}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], _canonical_dtype(input_dtype))})
expected_outputs = {
'min': np.array(
1, _canonical_dtype(output_dtypes['min']).as_numpy_dtype),
'max': np.array(
4, _canonical_dtype(output_dtypes['max']).as_numpy_dtype),
'sum': np.array(
5, _canonical_dtype(output_dtypes['sum']).as_numpy_dtype),
'size': np.array(
2, _canonical_dtype(output_dtypes['size']).as_numpy_dtype),
'mean': np.array(
2.5, _canonical_dtype(output_dtypes['mean']).as_numpy_dtype),
'var': np.array(
2.25, _canonical_dtype(output_dtypes['var']).as_numpy_dtype),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
@tft_unit.parameters(*itertools.product([
tf.int16,
tf.int32,
tf.int64,
tf.float32,
tf.float64,
tf.uint8,
tf.uint16,
], (True, False)))
def testNumericAnalyzersWithSparseInputs(self, input_dtype,
reduce_instance_dims):
def analyzer_fn(inputs):
return {
'min':
tft.min(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'max':
tft.max(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'sum':
tft.sum(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'size':
tft.size(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'mean':
tft.mean(inputs['a'], reduce_instance_dims=reduce_instance_dims),
'var':
tft.var(inputs['a'], reduce_instance_dims=reduce_instance_dims),
}
output_dtype = _canonical_dtype(input_dtype).as_numpy_dtype
input_data = [
{'idx': [0, 1], 'val': [0., 1.]},
{'idx': [1, 3], 'val': [2., 3.]},
]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.SparseFeature('idx', 'val', _canonical_dtype(input_dtype), 4)
})
if reduce_instance_dims:
expected_outputs = {
'min': np.array(0., output_dtype),
'max': np.array(3., output_dtype),
'sum': np.array(6., output_dtype),
'size': np.array(4, np.int64),
'mean': np.array(1.5, np.float32),
'var': np.array(1.25, np.float32),
}
else:
if input_dtype.is_floating:
missing_value_max = float('nan')
missing_value_min = float('nan')
else:
missing_value_max = np.iinfo(output_dtype).min
missing_value_min = np.iinfo(output_dtype).max
expected_outputs = {
'min': np.array([0., 1., missing_value_min, 3.], output_dtype),
'max': np.array([0., 2., missing_value_max, 3.], output_dtype),
'sum': np.array([0., 3., 0., 3.], output_dtype),
'size': np.array([1, 2, 0, 1], np.int64),
'mean': np.array([0., 1.5, float('nan'), 3.], np.float32),
'var': np.array([0., 0.25, float('nan'), 0.], np.float32),
}
self.assertAnalyzerOutputs(input_data, input_metadata, analyzer_fn,
expected_outputs)
def testNumericAnalyzersWithInputsAndAxis(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a'], reduce_instance_dims=False),
'max': tft.max(inputs['a'], reduce_instance_dims=False),
'sum': tft.sum(inputs['a'], reduce_instance_dims=False),
'size': tft.size(inputs['a'], reduce_instance_dims=False),
'mean': tft.mean(inputs['a'], reduce_instance_dims=False),
'var': tft.var(inputs['a'], reduce_instance_dims=False),
}
input_data = [
{'a': [8, 9, 3, 4]},
{'a': [1, 2, 10, 11]}
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([4], tf.int64)})
expected_outputs = {
'min': np.array([1, 2, 3, 4], np.int64),
'max': np.array([8, 9, 10, 11], np.int64),
'sum': np.array([9, 11, 13, 15], np.int64),
'size': np.array([2, 2, 2, 2], np.int64),
'mean': np.array([4.5, 5.5, 6.5, 7.5], np.float32),
'var': np.array([12.25, 12.25, 12.25, 12.25], np.float32),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
def testNumericAnalyzersWithNDInputsAndAxis(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a'], reduce_instance_dims=False),
'max': tft.max(inputs['a'], reduce_instance_dims=False),
'sum': tft.sum(inputs['a'], reduce_instance_dims=False),
'size': tft.size(inputs['a'], reduce_instance_dims=False),
'mean': tft.mean(inputs['a'], reduce_instance_dims=False),
'var': tft.var(inputs['a'], reduce_instance_dims=False),
}
input_data = [
{'a': [[8, 9], [3, 4]]},
{'a': [[1, 2], [10, 11]]}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([2, 2], tf.int64)})
expected_outputs = {
'min': np.array([[1, 2], [3, 4]], np.int64),
'max': np.array([[8, 9], [10, 11]], np.int64),
'sum': np.array([[9, 11], [13, 15]], np.int64),
'size': np.array([[2, 2], [2, 2]], np.int64),
'mean': np.array([[4.5, 5.5], [6.5, 7.5]], np.float32),
'var': np.array([[12.25, 12.25], [12.25, 12.25]], np.float32),
}
self.assertAnalyzerOutputs(
input_data, input_metadata, analyzer_fn, expected_outputs)
def testNumericAnalyzersWithShape1NDInputsAndAxis(self):
def analyzer_fn(inputs):
return {
'min': tft.min(inputs['a'], reduce_instance_dims=False),
'max': tft.max(inputs['a'], reduce_instance_dims=False),
'sum': tft.sum(inputs['a'], reduce_instance_dims=False),
'size': tft.size(inputs['a'], reduce_instance_dims=False),
'mean': tft.mean(inputs['a'], reduce_instance_dims=False),
'var': tft.var(inputs['a'], reduce_instance_dims=False),
}
input_data = [{'a': [[8, 9]]}, {'a': [[1, 2]]}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([1, 2], tf.int64)})
expected_outputs = {
'min': np.array([[1, 2]], np.int64),
'max': np.array([[8, 9]], np.int64),
'sum': | np.array([[9, 11]], np.int64) | numpy.array |
"""
.. module: plotting
:platform: Unix
:synopsis: Functions to plot various outputs of the `Pyranha` methods.
.. moduleauthor: <NAME> <<EMAIL>>
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from pylab import cm
import numpy as np
def plot_fisher_corner(fisher_mats, labels, xcen=0, ycen=1, xmin=-2.1, xmax=2.1,
ymin=0.88, ymax=1.12, title="", opath=None):
"""Function to plot 2x2 Fisher matrices.
:param fisher_mats: List of fisher matrices to be plotted.
"""
# Set up the figure environment.
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224)
plt.subplots_adjust(hspace=0, wspace=0)
# Set limits:
xarr = np.linspace(xmin, xmax, 1000)
yarr = np.linspace(ymin, ymax, 1000)
# Loope over the matrices to add them to the plot.
for fisher_mat, label in zip(fisher_mats, labels):
# Rescale the elements of the matrix x -> 10^3 x
fisher_mat[0, 0] *= 10**-6
fisher_mat[0, 1] *= 10**-3
fisher_mat[1, 0] *= 10**-3
# Compute the inverse of the matrix.
covar = np.linalg.inv(fisher_mat)
# Get the marginalized 1-sigma values.
sigma_00 = np.sqrt(covar[0, 0])
sigma_11 = np.sqrt(covar[1, 1])
# Compute Gaussians over this range centered on the parameters xcen
# and ycen.
oned_gauss_x = np.exp(- (xarr - xcen) ** 2 / (2. * sigma_00 ** 2))
oned_gauss_y = np.exp(- (yarr - ycen) ** 2 / (2. * sigma_11 ** 2))
# Get eigenvalues and eigenvectors of covariance matrix
w, v = np.linalg.eigh(covar)
# Get the angle (in degrees) from the vector with largest eigenvalue
angle_deg = np.arctan2(v[1, 1], v[0, 1]) * 180. / np.pi
width1 = 2 * np.sqrt(2.3 * w[1])
height1 = 2 * np.sqrt(2.3 * w[0])
width2 = 2 * | np.sqrt(5.99 * w[1]) | numpy.sqrt |
from functools import reduce
from copy import copy
from time import time
import numpy as np
import numpy.random as npr
import numpy.linalg as la
import scipy.linalg as sla
from scipy.linalg import solve_discrete_lyapunov, solve_discrete_are
from utility.matrixmath import vec, mat, mdot, matmul_lr, specrad, dlyap, dare, dare_gain
from quadtools import quadblock, quadstack, unquadblock, unquadstack
class LinearSystem:
def __init__(self, A, B, C, a, Aa, b, Bb, c, Cc, Q, W):
self.A = A
self.B = B
self.C = C
self.a = a
self.b = b
self.c = c
self.Aa = Aa
self.Bb = Bb
self.Cc = Cc
self.Q = Q
self.W = W
self.n = A.shape[0]
self.m = B.shape[1]
self.p = C.shape[0]
@property
def data(self):
return self.A, self.B, self.C, self.a, self.Aa, self.b, self.Bb, self.c, self.Cc, self.Q, self.W
@property
def dims(self):
return self.n, self.m, self.p
@property
def AB(self):
return np.block([self.A, self.B])
@property
def AC(self):
return np.block([[self.A], [self.C]])
class LinearSystemControlled(LinearSystem):
def __init__(self, system, K, L):
super().__init__(*system.data)
self.K = K
self.L = L
# Zeros matrices
self.Zn = np.zeros([self.n, self.n])
@property
def BK(self):
return self.B @ self.K
@property
def LC(self):
return self.L @ self.C
@property
def F(self):
return self.A + self.BK - self.LC
@property
def Phi_aug(self):
return np.block([[self.A, self.BK],
[self.LC, self.F]])
@property
def AK(self):
return self.A + self.BK
@property
def AL(self):
return self.A - self.LC
@property
def IK(self):
return np.block([[np.eye(self.n)], [self.K]])
@property
def IL(self):
return np.block([np.eye(self.n), self.L])
@property
def QK(self):
return matmul_lr(self.IK.T, self.Q)
@property
def WL(self):
return matmul_lr(self.IL, self.W)
@property
def IK_aug(self):
return sla.block_diag(np.eye(self.n), self.K)
@property
def IL_aug(self):
return sla.block_diag(np.eye(self.n), self.L)
@property
def QK_aug(self):
return matmul_lr(self.IK_aug.T, self.Q)
@property
def WL_aug(self):
return matmul_lr(self.IL_aug, self.W)
@property
def linop1(self):
# Closed-loop quadratic cost transition operator
linop = np.kron(self.Phi_aug.T, self.Phi_aug.T)
for i in range(self.a.size):
PhiAa = np.block([[self.Aa[i], self.Zn],
[self.Zn, self.Zn]])
linop += self.a[i]*np.kron(PhiAa.T, PhiAa.T)
for i in range(self.b.size):
PhiBb = np.block([[self.Zn, np.dot(self.Bb[i], self.K)],
[self.Zn, self.Zn]])
linop += self.b[i]*np.kron(PhiBb.T, PhiBb.T)
for i in range(self.c.size):
PhiCc = np.block([[self.Zn, self.Zn],
[np.dot(self.L, self.Cc[i]), self.Zn]])
linop += self.c[i]*np.kron(PhiCc.T, PhiCc.T)
return linop
@property
def linop2(self):
# Closed-loop second moment transition operator
linop = np.kron(self.Phi_aug, self.Phi_aug)
for i in range(self.a.size):
PhiAa = np.block([[self.Aa[i], self.Zn],
[self.Zn, self.Zn]])
linop += self.a[i]*np.kron(PhiAa, PhiAa)
for i in range(self.b.size):
PhiBb = np.block([[self.Zn, np.dot(self.Bb[i], self.K)],
[self.Zn, self.Zn]])
linop += self.b[i]*np.kron(PhiBb, PhiBb)
for i in range(self.c.size):
PhiCc = np.block([[self.Zn, self.Zn],
[ | np.dot(self.L, self.Cc[i]) | numpy.dot |
import os
import argparse
import sys
import numpy as np
import math
import scipy.sparse as sp
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class GCN(nn.Module):
def __init__(self, features, hidden, classes, dropout, layers=2):
super(GCN, self).__init__()
self.gc1 = GCLayer(features, hidden)
self.gc2 = GCLayer(hidden, classes)
self.gc3 = None
self.dropout = dropout
if layers == 3:
self.gc2 = GCLayer(hidden, hidden)
self.gc3 = GCLayer(hidden, classes)
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout)
x = self.gc2(x, adj)
if self.gc3 != None:
x = self.gc3(x, adj)
return F.log_softmax(x, dim=1)
class GCLayer(Module):
def __init__(self, dim_in, dim_out):
super(GCLayer, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.weight = Parameter(torch.FloatTensor(self.dim_in, self.dim_out))
self.bias = Parameter(torch.FloatTensor(self.dim_out))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
return output + self.bias
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),dtype=np.int32)
return np.array(list(map(classes_dict.get, labels)),dtype=np.int32)
def normalize(mx):
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def load_data(path):
idx_features_labels = np.genfromtxt("{}/cora.content".format(path),dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}/cora.cites".format(path),dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),shape=(labels.shape[0], labels.shape[0]),dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor( | np.where(labels) | numpy.where |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
# Globals
mtx = None
dist = None
# Calibrate camera API
def calibrate_camera(display_output = False):
# Chess board size
chessboard_size = (9, 6)
# Since we know object points, we can prepare them as (0, 0, 0), (1, 0, 0) ...
objp = np.zeros((chessboard_size[1] * chessboard_size[0], 3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Prepare input arrays for cv2.calibrateCamera()
object_points = []
image_points = []
# Load all images from camera_cal folder
images = glob.glob('camera_cal/calibration*.jpg')
image_shape = None
# Iterate through images and append image points for coresponding
for image in images:
# Read image
img = cv2.imread(image)
image_shape = img.shape
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find chessboard corners
ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
# Check if found corners successfuly
if ret is True:
# Append detected corners alongisde coresponding objp
object_points.append(objp)
image_points.append(corners)
# Display found corners as sanity check
if display_output is True:
cv2.drawChessboardCorners(img, chessboard_size, corners, ret)
cv2.imshow('Corners', img)
cv2.waitKey(200)
cv2.destroyAllWindows()
cv2.imwrite('output_images/calibration_output.jpg', img)
else:
# Opencv findChessboardCorners fails for for calibration images 1, 4, 5
# I guess the reason is missing whitespace around chessboard in those images
# Note from opencv site:
'''
The function requires white space (like a square-thick border, the wider the better) around the board to make the detection more robust in various
environments. Otherwise, if there is no border and the background is dark, the outer black squares cannot be segmented properly and so the square
grouping and ordering algorithm fails.
'''
print("Failed to find chessbpard corners for", image)
# Acquire camera matrix and distortion coeffs
ret, mtx, dist_coef, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, (image_shape[1], image_shape[0]), None, None)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist_coef
pickle.dump(dist_pickle, open("calibration_output/wide_dist_pickle.p", "wb"))
# Image processing pipeline API
def undistort_image(image, camera_matrix, distortion_coefficients):
# Just apply opencv distortion
return cv2.undistort(image, camera_matrix, distortion_coefficients, None, camera_matrix)
def absolute_sobel_thresholding(img, thresh_low = 20, thresh_high = 100, kernel_size = 5):
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply sobel in x direction
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = kernel_size)
# Absolute value
absolute_sobel = np.abs(sobel)
# Scale absolute to 0 - 255 range
scaled_absolute = np.uint8(255 * absolute_sobel / np.max(absolute_sobel))
# Prepare output
binary_output = np.zeros_like(scaled_absolute)
# Calculate output
binary_output[(scaled_absolute > thresh_low) & (scaled_absolute < thresh_high)] = 1
return binary_output
def color_thresholding(img, thresh_low = 170, thresh_high = 255):
# Convert image to HLS color space
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Isolate S channel
s_channel = hls[:, :, 2]
# Calculate binary output
binary_output = np.zeros_like(s_channel)
binary_output[(s_channel > thresh_low) & (s_channel <= thresh_high)] = 1
return binary_output
def gradient_direction_thresholding(img, sobel_kernel=5, thresh=(0, np.pi/2)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Get gradient in x direction
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
# Get gradient in y direction
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
# Get absolute values for each image
abs_sobel_x = np.abs(sobel_x)
abs_sobel_y = np.abs(sobel_y)
# Calculate gradient direction
gradient_direction = np.arctan2(abs_sobel_y, abs_sobel_x)
# Prepare output
binary_output = np.zeros_like(gradient_direction)
# Do the thresholding
l_thresh, h_thresh = thresh
binary_output[(gradient_direction >= l_thresh) & (gradient_direction <= h_thresh)] = 1
return binary_output
# Will not be used since it introduces additional y direction edges
def gradient_magnitude_thresholding(img, sobel_kernel=5, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Get gradient in x direction
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
# Get gradient in y direction
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
# Calculate magnitude as mag = sqrt(sobel_x^2 + sobel_y^2)
magnitude = np.sqrt(np.square(sobel_x) + np.square(sobel_y))
# Scale the magnitude to 0 - 255 range
scaled_magniuted = np.uint8(255 * magnitude / np.max(magnitude))
# Create binary mask
binary_output = np.zeros_like(scaled_magniuted)
low_thresh, high_thresh = mag_thresh
# Do the thresholding
binary_output[(low_thresh <= scaled_magniuted) & (scaled_magniuted <= high_thresh)] = 1
return binary_output
def apply_thresholds(img,
blur_kernel_size = 5,
abs_sobel_kernel = 5,
abs_soble_threshold = (20, 100),
color_thresholds = (170, 255),
gradient_magnitude_kernel_size = 5,
gradient_magnitude_thresholds = (80, 100),
gradient_kernel_size = 15,
gradient_direction_thresholds = (0.7, 1.4)):
# Apply bluring to the image
img = cv2.GaussianBlur(img, (blur_kernel_size, blur_kernel_size), 0)
# Sobel gradient thresholding
gradient_binary = absolute_sobel_thresholding(img, thresh_low = abs_soble_threshold[0], thresh_high = abs_soble_threshold[1], kernel_size = abs_sobel_kernel)
# S channel thresholding
color_binary = color_thresholding(img, thresh_low = color_thresholds[0], thresh_high = color_thresholds[1])
# Apply gradient magnitude thresholding
mag_binary = gradient_magnitude_thresholding(img, sobel_kernel = gradient_magnitude_kernel_size, mag_thresh = gradient_magnitude_thresholds)
# Apply gradient direction thresholding
direction_binary = gradient_direction_thresholding(img, sobel_kernel = gradient_kernel_size, thresh = gradient_direction_thresholds)
# Prepare binary output
combined_binary = np.zeros_like(gradient_binary)
# Combine thresholds
combined_binary[((mag_binary == 1) & (direction_binary == 1)) | (color_binary == 1) | (gradient_binary == 1)] = 1
return combined_binary
def go_to_birdview_perspective(img):
# Start by defining source points
# Magic numbers acquired from gimp
point1 = [280, 700]
point2 = [595, 460]
point3 = [725, 460]
point4 = [1125, 700]
source = np.float32([point1, point2, point3, point4])
# Define destination vertices
# Magic numbers acquired from gimp
dest_point1 = [250, 720]
dest_point2 = [250, 0]
dest_point3 = [1065, 0]
dest_point4 = [1065, 720]
destination = np.float32([dest_point1, dest_point2, dest_point3, dest_point4])
transformation_matrix = cv2.getPerspectiveTransform(source, destination)
inverse_transform_matrix = cv2.getPerspectiveTransform(destination, source)
output = cv2.warpPerspective(img, transformation_matrix, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
return output, inverse_transform_matrix
def find_lane_start(binary_warped):
# Sum all the ones in binary image per column
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis = 0)
# Find the mid point in histogram
midpoint = np.int(histogram.shape[0] // 2)
# Find left peak
left_base = np.argmax(histogram[:midpoint])
# Find right peak
right_base = np.argmax(histogram[midpoint:]) + midpoint
return left_base, right_base
def find_lane_pixels(binary_warped, number_of_windows = 9, margin = 100, min_pixel_detection = 50, draw_windows = False):
# Calculate height of single window
window_height = np.int(binary_warped.shape[0] // number_of_windows)
# Find indices of all non zero pixels in the image
non_zero = binary_warped.nonzero()
non_zero_x = np.array(non_zero[1])
non_zero_y = np.array(non_zero[0])
# Set position of current window
left_current_x, right_current_x = find_lane_start(binary_warped)
# Prepare output lists in which we put indices of pixels that belong to the lane line
left_lane_indices = []
right_lane_indices = []
# Optional draw output
if draw_windows is True:
out_image = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
for window in range(number_of_windows):
# Identify window vertical boundaries
window_y_low = binary_warped.shape[0] - (window + 1) * window_height
window_y_high = binary_warped.shape[0] - window * window_height
# Identify window horizontal boundaries
left_window_x_low = left_current_x - margin
left_window_x_high = left_current_x + margin
right_window_x_low = right_current_x - margin
right_window_x_high = right_current_x + margin
# Optional draw
if draw_windows is True:
cv2.rectangle(out_image,(left_window_x_low, window_y_low),
(left_window_x_high, window_y_high), (0,255,0), 4)
cv2.rectangle(out_image,(right_window_x_low, window_y_low),
(right_window_x_high, window_y_high),(0,255,0), 4)
# Identify all pixels that belong to left window
left_belonging_pixels_indices = ((non_zero_y >= window_y_low) &
(non_zero_y < window_y_high) &
(non_zero_x >= left_window_x_low) &
(non_zero_x < left_window_x_high)).nonzero()[0]
# Identify all pixels that belong to right window
right_belonging_pixels_indices = ((non_zero_y >= window_y_low) &
(non_zero_y < window_y_high) &
(non_zero_x >= right_window_x_low) &
(non_zero_x < right_window_x_high)).nonzero()[0]
# Record belonging left line indices
left_lane_indices.append(left_belonging_pixels_indices)
# Record belonging right line indices
right_lane_indices.append(right_belonging_pixels_indices)
# Recalculate center of next iteration window
if len(left_belonging_pixels_indices) > min_pixel_detection:
left_current_x = int(np.mean(non_zero_x[left_belonging_pixels_indices]))
if len(right_belonging_pixels_indices) > min_pixel_detection:
right_current_x = int(np.mean(non_zero_x[right_belonging_pixels_indices]))
# Concatenate all the recorded pixel coordinates
left_lane_indices = np.concatenate(left_lane_indices)
right_lane_indices = np.concatenate(right_lane_indices)
# Extract left and right lane line pixel positions
left_x = non_zero_x[left_lane_indices]
left_y = non_zero_y[left_lane_indices]
right_x = non_zero_x[right_lane_indices]
right_y = non_zero_y[right_lane_indices]
if draw_windows is True:
return left_x, left_y, right_x, right_y, out_image
else:
return left_x, left_y, right_x, right_y
def fit_poly(line_x, line_y):
poly = np.polyfit(line_y, line_x, 2)
return poly
# Debug fuction with drawing
def fit_polynomial(left_line_x, left_line_y, right_line_x, right_line_y, img_shape, out_img, draw_lines = False):
# Fit poly
# Reverse x and y because we for single x value, we can have multiple points
left_line = np.polyfit(left_line_y, left_line_x, 2)
right_line = np.polyfit(right_line_y, right_line_x, 2)
if draw_lines is not True:
return left_line, right_line
else:
# Calculate concrete values so we can plot line
plot_y = np.linspace(0, img_shape[0] - 1, img_shape[0])
left_line_ploted = left_line[0] * plot_y ** 2 + left_line[1] * plot_y + left_line[2]
right_line_ploted = right_line[0] * plot_y ** 2 + right_line[1] * plot_y + right_line[2]
# Prepare output image
out_img[left_line_y, left_line_x] = [255, 0, 0]
out_img[right_line_y, right_line_x] = [0, 0, 255]
f = plt.figure()
plt.imshow(out_img)
# Plots the left and right polynomials on the lane lines
plt.plot(left_line_ploted, plot_y, color='yellow', linewidth=5.0)
plt.plot(right_line_ploted, plot_y, color='yellow', linewidth=5.0)
f.tight_layout()
f.savefig('output_images/fitted_lines_window_with_line.jpg')
return left_line, right_line, out_img
def targeted_search(warped_binary, prev_left_line, prev_right_line, margin = 100, draw_lines = False):
# Find non zero pixels
non_zero = warped_binary.nonzero()
non_zero_x = non_zero[1]
non_zero_y = non_zero[0]
# Search for points around previous lane detection, similar to what we did with windows
left_belonging_pixel_indices = ((non_zero_x > (prev_left_line[0] * (non_zero_y ** 2) + prev_left_line[1] * non_zero_y + prev_left_line[2] - margin)) &
(non_zero_x < (prev_left_line[0] * (non_zero_y ** 2) + prev_left_line[1] * non_zero_y + prev_left_line[2] + margin)))
right_belonging_pixel_indices = ((non_zero_x > (prev_right_line[0] * (non_zero_y ** 2) + prev_right_line[1] * non_zero_y + prev_right_line[2] - margin)) &
(non_zero_x < (prev_right_line[0] * (non_zero_y ** 2) + prev_right_line[1] * non_zero_y + prev_right_line[2] + margin)))
# Extract left and right lane line pixel positions
left_line_x = non_zero_x[left_belonging_pixel_indices]
left_line_y = non_zero_y[left_belonging_pixel_indices]
right_line_x = non_zero_x[right_belonging_pixel_indices]
right_line_y = non_zero_y[right_belonging_pixel_indices]
if draw_lines is False:
return left_line_x, left_line_y, right_line_x, right_line_y
else:
left_line = fit_poly(left_line_x, left_line_y)
right_line = fit_poly(right_line_x, right_line_y)
# Calculate concrete values so we can plot line
plot_y = np.linspace(0, warped_binary.shape[0] - 1, warped_binary.shape[0])
left_line_ploted = left_line[0] * plot_y ** 2 + left_line[1] * plot_y + left_line[2]
right_line_ploted = right_line[0] * plot_y ** 2 + right_line[1] * plot_y + right_line[2]
# Prepare output image
out_img = np.dstack((warped_binary, warped_binary, warped_binary)) * 255
out_img[left_line_y, left_line_x] = [255, 0, 0]
out_img[right_line_y, right_line_x] = [0, 0, 255]
f = plt.figure()
plt.imshow(out_img)
plt.plot(left_line_ploted, plot_y, color='white')
plt.plot(right_line_ploted, plot_y, color='white')
f.tight_layout()
f.savefig('output_images/targeted_search_lines_window_with_line.jpg')
return left_line_x, left_line_y, right_line_x, right_line_y, out_img
def measure_curvature(img_shape, line, ym_per_pix = 30 / 720, xm_per_pix = 3.7 / 700):
# Generate y values for plotting
plot_y = np.linspace(0, img_shape[0] - 1, img_shape[0])
# Calculate x values using polynomial coeffs
line_x = line[0] * plot_y ** 2 + line[1] * plot_y + line[2]
# Evaluate at bottom of image
y_eval = np.max(plot_y)
# Fit curves with corrected axis
curve_fit = np.polyfit(plot_y * ym_per_pix, line_x * xm_per_pix, 2)
# Calculate curvature for line
curvature = ((1 + (2 * curve_fit[0] * y_eval * ym_per_pix + curve_fit[1]) ** 2) ** (3 / 2)) / np.absolute(
2 * curve_fit[0])
return curvature
def calculate_vehicle_offset(image_shape, left_line, right_line, meter_per_pixel_x = 3.7 / 700):
# We will calculate offset at same point as we did for calculating curvature
y_eval = image_shape[0]
# Find values for both lines in those at y pos
left_x = left_line[0] * (y_eval ** 2) + left_line[1] * y_eval + left_line[2]
right_x = right_line[0] * (y_eval ** 2) + right_line[1] * y_eval + right_line[2]
# Middle of the lane should be in middle of the image
mid_image = image_shape[1] // 2
# Car position - middle between lane lines
car_position = (left_x + right_x) / 2
# Calculate offset
offset = (mid_image - car_position) * meter_per_pixel_x
return offset
def unwarp_detection(left_line, right_line, inverse_transformation_matrix, undistorted_image):
# Calculate x points for each y point
y = np.linspace(0, 720, 719)
left_line_x_points = left_line[0] * (y ** 2) + left_line[1] * y + left_line[2]
right_line_x_points = right_line[0] * (y ** 2) + right_line[1] * y + right_line[2]
# Create an image to draw the lines on
warp_zero = np.zeros_like(undistorted_image[:,:,0]).astype(np.uint8)
color_warp = | np.dstack((warp_zero, warp_zero, warp_zero)) | numpy.dstack |
import pandas
import scipy
import numpy
def identity(x):
return x
def rolling_apply(df, window_size, f, trim=True, **kargs):
''' A function that will apply a window wise operation to the rows of a
dataframe, df.
input:
df - a pandas dataframe
window_size - The size of windows to extract from each row series
f - A function mapping f(numpy.ndarray) -> scalar
trim - a boolean, if true, columns with null values will be trimmed
kargs - Other parameters to pass to the Series.rolling constructor
output:
a dataframe
'''
data = df.apply(lambda x: x.rolling(window_size, **kargs).apply(f), axis=1)
if(trim):
data = data.dropna(axis=1, how='any')
return data
def transform_standardized(cell_trap_frame, axis=1):
""" Transforms a dataframe into a feature representation of the data
standardizing each element by row. Use axis=0 to perform the
transformation by column as a form of time normalization.
ret = ( t(raw) - min(t_axis) ) / max(t_axis)
"""
mins = cell_trap_frame.min(axis=axis)
maxs = cell_trap_frame.max(axis=axis)
temp = cell_trap_frame.sub(mins, axis=abs(axis - 1))
return temp.div(maxs, axis=abs(axis - 1))
def resample(cell_trap_frame, resample_freq=60):
cell_trap = cell_trap_frame.transpose()
cell_trap.index = pandas.to_datetime(cell_trap.index, unit="s")
cell_trap_resamp = cell_trap.resample("{0}T".format(resample_freq)).mean()
data_plot = cell_trap_resamp.transpose()
data_plot.columns = data_plot.columns.values.astype(numpy.int64) // 10**9
return data_plot
def holt_winters_second_order_ewma(x, alpha, beta):
"""
A smoothing function that takes a weighted mean of a point in a time
series with respect to its history. alpha is the weight (ie, the relative
importance) with which the time series most recent behavior is valued.
Similarly, beta is the weight given to the most recent 1st derivative, w,
when averaging the trend.
input
:param x: array or array-like
Time series to be smoothed.
:param alpha: float, (0,1)
Weight given to most recent time point.
:param beta: float, (0, 1)
Weight given to most recent linear slope/trend.
:return: s: numpy.array
The smoothed time series.
"""
N = x.size
s = numpy.zeros((N, ))
b = numpy.zeros((N, ))
s[0] = x[0]
for i in range(1, N):
if(numpy.isnan(s[i-1])):
s[i] = x[i]
s[i] = alpha * x[i] + (1 - alpha) * (s[i - 1] + b[i - 1])
b[i] = beta * (s[i] - s[i - 1]) + (1 - beta) * b[i - 1]
return s
def smooth(cell_trap_frame, alpha=0.1, beta=0.001):
return cell_trap_frame.apply(
lambda x: holt_winters_second_order_ewma(x, alpha, beta),
axis=1,
raw=True)
def rolling_standardized_center(cell_trap_frame, window_size):
''' ret[a, b] = ( ctf[a,b] - min(ctf[a, b-ws/2:b+ws/2]) / max(ctf[a, b-ws/2:b+ws/2])
'''
return rolling_apply(
cell_trap_frame,
window_size,
lambda x: (x[window_size / 2] - x.min()) / x.max(),
center=True)
def rolling_standardized_right(cell_trap_frame, window_size):
''' let ctf = cell_trap_frame - min(cell_trap_frame) + 1
ret[a,b] = ( ctf[a,b] - min(ctf[a, b-ws:b]) ) / max(ctf[a,b-ws:b])
'''
cell_trap_frame = cell_trap_frame - cell_trap_frame.min().min() + 1
return rolling_apply(
cell_trap_frame,
window_size,
lambda x: (x[-1] - x.min()) / x.max() )
def transform_z_score(cell_trap_frame, axis=0):
''' z_score the columns (axis=0) or rows(axis=1) of a dataframe.
'''
return cell_trap_frame.apply(
scipy.stats.mstats.zscore,
raw=True,
axis=axis)
def rolling_z_score_center(cell_trap_frame, window_size):
''' ret[a,b] = zscore(ctf[a, b-ws/2:b+ws/2])[b]
'''
return rolling_apply(
cell_trap_frame,
window_size,
lambda x: scipy.stats.mstats.zscore(x)[window_size / 2],
center=True)
def rolling_z_score_right(cell_trap_frame, window_size):
''' ret[a.b] = zscore(ctf[a,b-ws:b])[b]
'''
return rolling_apply(
cell_trap_frame,
window_size,
lambda x: (x[-1] - x.mean()) / x.std())
def normalize_time(cell_trap_frame):
""" Transforms a dataframe by dividing each element by its column average.
Applying this transformation effectively means that each element is
now scaled in comparison to other elements observed at same time
ret = t(raw) / mean(t_column)
"""
means = cell_trap_frame.mean(axis=0)
return cell_trap_frame.div(means, axis=1)
def transform_delta(cell_trap_frame, shift=False):
''' Computes the delta across rows, delta(T, T-1). By default, the values
will associate with the t-1 label. Use shift=True to associate deltas
with the T label
ret = t(raw) - t-1(raw)
'''
deltas = cell_trap_frame.diff(axis=1)
if(shift):
deltas = deltas.shift(-1, axis=1)
return deltas.iloc[:, :-1]
else:
return deltas.iloc[:, 1:]
def transform_derivative(cell_trap_frame):
''' computes first derivative of the cell trap data
delta(signal)/delta(time)
'''
deltas = cell_trap_frame.diff(axis=1)
times = pandas.Series(cell_trap_frame.keys())
label_map = {k1: k2 for k1, k2 in zip(times.keys(), deltas.keys())}
times = times.rename(label_map)
delta_times = times.diff()
ret = deltas.apply(lambda c: c / delta_times, axis=1)
remap = {v: (i + v) / 2 for i,v in zip(ret.keys(), ret.keys()[1:])}
ret = ret.rename(columns=remap)
return ret
def transform_rof(cell_trap_frame):
middle_values = rolling_apply(
cell_trap_frame,
2,
lambda x: float(x[0] + x[1]) / 2)
deltas = transform_delta(cell_trap_frame)
return middle_values + deltas
#TODO: NH: make the interior print statement a warnings.waring(), instead of a print().
def drop_duplicates(df, subset=None, keep='first'):
""" Drops duplicates from a DataFrame df.
Params :
df : DataFrame
A dataFrame duplicates should be removed from
subset : [obj]
A list of column keys in df to care about when establishing
if two entries are duplicates. Defaults to all column keys
keep : 'first' or 'last' or False
A rule to determine which duplicate entries should be kept.
'first' means that the first entry of a duplicate should be
kept while 'last' means that the last entry of a duplicate
should be kept. If rule=False, then all duplicated entries
will be dropped.
Returns: DataFrame
"""
data = df.loc[numpy.invert(df.duplicated(subset=subset, keep=keep))]
if data.shape != df.shape:
print('Dropped duplicates : Original - {0!s} : New - {1!s}'.format(
df.shape, data.shape))
return data
def add_mean_and_std(df):
""" Return a copy of df with rows representing mean and standard
deviation added. Mean corrosponds to index 0 and stddev is -1
"""
mean_series = df.mean(axis=0)
std_series = df.std(axis=0)
ret = df.copy()
ret.loc[0] = mean_series
ret.loc[-1] = std_series
return ret.sort_index()
feature_types = {
'raw': lambda x: x,
'normalized': normalize_time,
'z_scored': lambda x: transform_z_score(x, axis=0),
'raw`': transform_derivative,
'raw`^2': lambda x: transform_derivative(x) ** 2,
'raw``': lambda x: transform_derivative(transform_derivative(x)),
'raw``^2': lambda x: transform_derivative(transform_derivative(x)) ** 2,
'formation_rate': transform_rof,
'standardized_10': lambda x: rolling_standardized_right(x, 10),
'standardized_20': lambda x: rolling_standardized_right(x, 20),
'standardized_30': lambda x: rolling_standardized_right(x, 30),
'rolling_z-score_10': lambda x: rolling_z_score_right(x, 10),
'rolling_z-score_20': lambda x: rolling_z_score_right(x, 20),
'rolling_z-score_30': lambda x: rolling_z_score_right(x, 30),
'smooth_rolling_z_score_30': lambda x: rolling_z_score_right(smooth(x),30),
'smooth': lambda x: smooth(x),
'smooth`': lambda x: transform_derivative(smooth(x)),
'smooth`^2': lambda x: transform_derivative(smooth(x)) ** 2,
'smooth``':
lambda x: transform_derivative(transform_derivative(smooth(x))),
'smooth``^2':
lambda x: transform_derivative(transform_derivative(smooth(x))) ** 2,
'smooth_resampled': lambda x: smooth(resample(x)),
'smooth_resampled``':
lambda x: smooth(resample(x)).diff(axis=1).diff(axis=1).dropna(axis=1, how='any'),
'smooth_resampled``^2':
lambda x: smooth(resample(x)).diff(axis=1).diff(axis=1).dropna(axis=1, how='any') ** 2,
'transform_derivative': lambda x: transform_derivative(x),
'sqrt': lambda x: | numpy.sqrt(x) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 20:14:10 2021
@author: Oliver
"""
"""Univariate Optimisation"""
# Golden Search Technique for finding the Maximum
import numpy as np
def gsection(ftn, xl, xm, xr, tol = 1e-9):
# applies the golden-section algorithm to maximise ftn
# we assume that ftn is a function of a single variable
# and that x.l < x.m < x.r and ftn(x.l), ftn(x.r) <= ftn(x.m)
#
# the algorithm iteratively refines x.l, x.r, and x.m and
# terminates when x.r - x.l <= tol, then returns x.m
# golden ratio plus one
gr1 = 1 + (1 + np.sqrt(5))/2
#
# successively refine x.l, x.r, and x.m
fl = ftn(xl)
fr = ftn(xr)
fm = ftn(xm)
while ((xr - xl) > tol):
if ((xr - xm) > (xm - xl)):
y = xm + (xr - xm)/gr1
fy = ftn(y)
if (fy >= fm):
xl = xm
fl = fm
xm = y
fm = fy
else:
xr = y
fr = fy
else:
y = xm - (xm - xl)/gr1
fy = ftn(y)
if (fy >= fm):
xr = xm
fr = fm
xm = y
fm = fy
else:
xl = y
fl = fy
return(xm)
xl=0
xm=5
xr=10
def ftn(x):
return x**2+5*x+10
print(gsection(ftn, xl, xm, xr, tol = 1e-9))
print(ftn(xm))
# Newton Maximizing Algorithm
import math
import matplotlib.pyplot as plt
def f(x):
return x**3-6*x**2+4*x+2
x = np.linspace(-1, 1)
fig, ax = plt.subplots()
ax.plot(x, f(x), label='target')
ax.grid()
def fprime(x):
return 3*x**2-12*x+4
def fsecond(x):
return 6*x - 12
def quadratic_approx(x, x0, f, fprime, fsecond):
return f(x0)+fprime(x0)*(x-x0)+0.5*fsecond(x0)*(x-x0)**2
x = np.linspace(-1, 1)
fig, ax = plt.subplots()
ax.plot(x, f(x), label='target')
ax.grid()
ax.plot(x, quadratic_approx(x, 0, f, fprime, fsecond), color='red', label='quadratic approximation')
ax.set_ylim([-2,3])
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
plt.legend()
def newton(x0, fprime, fsecond, maxiter=100, eps=0.0001):
x=x0
for i in range(maxiter):
xnew=x-(fprime(x)/fsecond(x))
if xnew-x<eps:
return xnew
print('converged')
break
x = xnew
return x
x_star=newton(0, fprime, fsecond)
fig, ax = plt.subplots()
ax.plot(x, f(x), label='target')
ax.grid()
ax.plot(x, quadratic_approx(x, x_star , f, fprime, fsecond), color='red', label='quadratic approximation')
ax.set_ylim([-2,3])
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
ax.axvline(x = x_star, color='green')
plt.legend()
#<NAME>: See: https://medium.com/swlh/optimization-algorithms-the-newton-method-4bc6728fb3b6
# Newton Minimizing Algorithm
def f(x):
return -x**3+6*x**2-4*x-2
x = np.linspace(-1, 1)
fig, ax = plt.subplots()
ax.plot(x, f(x), label='target')
ax.grid()
def fprime(x):
return -3*x**2+12*x-4
def fsecond(x):
return -6*x + 12
def quadratic_approx(x, x0, f, fprime, fsecond):
return f(x0)+fprime(x0)*(x-x0)+0.5*fsecond(x0)*(x-x0)**2
x = np.linspace(-1, 1)
fig, ax = plt.subplots()
ax.plot(x, f(x), label='target')
ax.grid()
ax.plot(x, quadratic_approx(x, 0, f, fprime, fsecond), color='red', label='quadratic approximation')
ax.set_ylim([-5,3])
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
plt.legend()
def newton(x0, fprime, fsecond, maxiter=100, eps=0.0001):
x=x0
for i in range(maxiter):
xnew=x-(fprime(x)/fsecond(x))
if xnew-x<eps:
return xnew
print('converged')
break
x = xnew
return x
x_star=newton(0, fprime, fsecond)
fig, ax = plt.subplots()
ax.plot(x, f(x), label='target')
ax.grid()
ax.plot(x, quadratic_approx(x, x_star , f, fprime, fsecond), color='red', label='quadratic approximation')
ax.set_ylim([-5,3])
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
ax.axvline(x = x_star, color='green')
plt.legend()
#Valentina Alto: See: https://medium.com/swlh/optimization-algorithms-the-newton-method-4bc6728fb3b6
# Gradient Descent vs Newton Unconstrained Multivariate Optimisation
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import numpy as np
from mpl_toolkits import mplot3d
def Rosenbrock(x,y):
return (1 + x)**2 + 100*(y - x**2)**2
def Grad_Rosenbrock(x,y):
g1 = -400*x*y + 400*x**3 + 2*x -2
g2 = 200*y -200*x**2
return np.array([g1,g2])
def Hessian_Rosenbrock(x,y):
h11 = -400*y + 1200*x**2 + 2
h12 = -400 * x
h21 = -400 * x
h22 = 200
return np.array([[h11,h12],[h21,h22]])
def Gradient_Descent(Grad,x,y, gamma = 0.00125, epsilon=0.0001, nMax = 10000 ):
#Initialization
i = 0
iter_x, iter_y, iter_count = np.empty(0), | np.empty(0) | numpy.empty |
import rdkit
from rdkit.Chem import rdMolTransforms
from rdkit.Chem import TorsionFingerprints
import numpy as np
import networkx as nx
import random
atomTypes = ['H', 'C', 'B', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'Br', 'I']
formalCharge = [-1, -2, 1, 2, 0]
degree = [0, 1, 2, 3, 4, 5, 6]
num_Hs = [0, 1, 2, 3, 4]
local_chiral_tags = [0, 1, 2, 3]
hybridization = [
rdkit.Chem.rdchem.HybridizationType.S,
rdkit.Chem.rdchem.HybridizationType.SP,
rdkit.Chem.rdchem.HybridizationType.SP2,
rdkit.Chem.rdchem.HybridizationType.SP3,
rdkit.Chem.rdchem.HybridizationType.SP3D,
rdkit.Chem.rdchem.HybridizationType.SP3D2,
rdkit.Chem.rdchem.HybridizationType.UNSPECIFIED,
]
bondTypes = ['SINGLE', 'DOUBLE', 'TRIPLE', 'AROMATIC']
def one_hot_embedding(value, options):
embedding = [0]*(len(options) + 1)
index = options.index(value) if value in options else -1
embedding[index] = 1
return embedding
def adjacency_to_undirected_edge_index(adj):
adj = np.triu(np.array(adj, dtype = int)) #keeping just upper triangular entries from sym matrix
array_adj = np.array( | np.nonzero(adj) | numpy.nonzero |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 15:30:15 2021
@author: altair
"""
import math
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
df = web.DataReader('AAPL', data_source= 'yahoo', start= '2015-01-01', end= '2020-12-31')
print(df)
print(df.shape)
# visualize closing price
plt.figure(figsize=(16,8))
plt.title('Close Price History', fontsize= 18)
plt.plot(df['Close'])
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price USD', fontsize= 18)
plt.show()
# create new dataframe with close column
data = df.filter(['Close'])
# convert the dataframe to a numpy array
dataset = data.values
# convert the number of rows to train the model on
training_data_len = math.ceil(len(dataset) * 0.8)
print('\n training_data_len:',training_data_len)
#scale the data
scaler = MinMaxScaler(feature_range= (0, 1))
scaled_data = scaler.fit_transform(dataset)
print('\nscaled_data', scaled_data)
# create the training data set, scaled training dataset
train_data = scaled_data[0:training_data_len, :]
#split the daa into x_train and y_train dataset
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i,0])
if i <= 60:
print(x_train)
print(y_train)
print()
# convert x_train and y_train to numpy arrays
x_train, y_train = np.array(x_train), np.array(y_train)
#reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
print('\nx_train reshape:',x_train.shape)
# build the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences= True, input_shape= (x_train.shape[1],1)))
model.add(LSTM(50,return_sequences= False))
model.add(Dense(25))
model.add(Dense(1))
# compile the model
model.compile(optimizer='adam', loss= 'mean_squared_error')
#train the model
model.fit(x_train, y_train, batch_size= 1, epochs= 1)
# create the testing data, create dataset x_test, y_test
test_data = scaled_data[training_data_len - 60: , :]
x_test = []
y_test = dataset[training_data_len, :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
# convert the data to a numpy array
x_test = np.array(x_test)
# reshape the data
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1],1))
# get the models predicted price values
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
# rmse
rmse = np.sqrt( | np.mean(predictions - y_test) | numpy.mean |
"""This module contains code for the cosmic ray monitor Bokeh plots.
Author
------
- <NAME>
Use
---
This module can be used from the command line as such:
::
"""
import os
from astropy.io import fits
from astropy.time import Time
import datetime
import numpy as np
import matplotlib.pyplot as plt
from jwql.database.database_interface import session
from jwql.database.database_interface import MIRICosmicRayQueryHistory, MIRICosmicRayStats
from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE
from jwql.utils.utils import get_config, filesystem_path
from jwql.bokeh_templating import BokehTemplate
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
class CosmicRayMonitor(BokehTemplate):
# Combine instrument and aperture into a single property because we
# do not want to invoke the setter unless both are updated
@property
def aperture_info(self):
return (self._instrument, self._aperture)
@aperture_info.setter
def aperture_info(self, info):
self._instrument, self._aperture = info
self.pre_init()
self.post_init()
def pre_init(self):
# Start with default values for instrument and aperture because
# BokehTemplate's __init__ method does not allow input arguments
try:
dummy_instrument = self._instrument
dummy_aperture = self._aperture
except AttributeError:
self._instrument = 'MIRI'
self._aperture = 'MIRIM_FULL'
self._embed = True
# App design
self.format_string = None
self.interface_file = os.path.join(SCRIPT_DIR, "yaml", "cosmic_ray_monitor_interface.yaml")
# Load data tables
self.load_data()
self.get_history_data()
# Get dates and coordinates of the most recent entries
#self.most_recent_data()
def post_init(self):
self._update_cosmic_ray_v_time()
self._update_cosmic_ray_histogram()
def get_histogram_data(self):
"""Get data required to create cosmic ray histogram from the
database query.
"""
last_hist_index = -1
hist = plt.hist(self.mags[last_hist_index])
self.bin_left = np.array( [bar.get_x() for bar in hist[2]] )
self.amplitude = [bar.get_height() for bar in hist[2]]
self.bottom = [bar.get_y() for bar in hist[2]]
deltas = self.bin_left[1:] - self.bin_left[0: -1]
self.bin_width = | np.append(deltas[0], deltas) | numpy.append |
import numpy as np
import cv2
import gym
import ray
def collect_trajectories(env, params):
total_steps = params['total_steps']
steps_per_episode = params['steps_per_episode']
max_action = np.array(params['max_action'])
mode = params['mode']
states = []
actions = []
episode_step_count = 0
total_step_count = 0
obs = env.reset()
if mode == 'image':
states.append([cv2.resize(obs, dsize=(64, 64)) / 255])
elif mode == 'raw':
states.append([env.unwrapped.state])
else:
states.append([obs])
actions.append([])
while total_step_count < total_steps:
action = env.action_space.sample()
actions[-1].append(np.array(action) / max_action)
obs, reward, done, extra = env.step(action)
if mode == 'image':
states[-1].append(cv2.resize(obs, dsize=(64, 64)) / 255)
elif mode == 'raw':
states[-1].append(env.unwrapped.state)
else:
states[-1].append(obs)
episode_step_count += 1
total_step_count += 1
if done or episode_step_count > steps_per_episode:
obs = env.reset()
if mode == 'image':
states.append([cv2.resize(obs, dsize=(64, 64)) / 255])
elif mode == 'raw':
states.append([env.unwrapped.state])
else:
states.append([obs])
actions.append([])
episode_step_count = 0
return states, actions
def collect_local_input(env, params):
starting_state = params['starting_state']
max_action = np.array(params['max_action'])
num_steps_observation = params['num_steps_observation']
T = params['T']
num_samples = params['num_samples']
obs = []
action_chains = []
obs_t = []
for i in range(num_samples):
env.reset()
env.unwrapped.state = starting_state
complete_list = [cv2.resize(env.unwrapped.get_obs(), dsize=(64, 64)) / 255]
action_list = []
for t in range(T + num_steps_observation - 1):
action = env.action_space.sample()
o, reward, done, extra = env.step(action)
complete_list.append(cv2.resize(o, dsize=(64, 64)) / 255)
action_list.append(action / max_action)
obs_list = complete_list[:num_steps_observation]
obs_t_list = complete_list[T:]
action_list = action_list[:T]
obs.append(np.concatenate(obs_list, axis=2))
action_chains.append(np.concatenate(action_list, axis=0))
obs_t.append(np.concatenate(obs_t_list, axis=2))
data = {'obs': np.array(obs),
'actions': np.array(action_chains),
'obs_t': np.array(obs_t)}
return data
def collect_raw_local_input(env, params):
starting_state = params['starting_state']
max_action = np.array(params['max_action'])
num_steps_observation = params['num_steps_observation']
T = params['T']
num_samples = params['num_samples']
obs = []
action_chains = []
obs_t = []
for i in range(num_samples):
env.reset()
env.unwrapped.state = starting_state
complete_list = [env.unwrapped.state]
action_list = []
for t in range(T + num_steps_observation - 1):
action = env.action_space.sample()
env.step(action)
complete_list.append(env.unwrapped.state)
action_list.append(action / max_action)
obs_list = complete_list[:num_steps_observation]
obs_t_list = complete_list[T:]
action_list = action_list[:T]
obs.append(np.concatenate(obs_list, axis=0))
action_chains.append(np.concatenate(action_list, axis=0))
obs_t.append(np.concatenate(obs_t_list, axis=0))
data = {'obs': np.array(obs),
'actions': np.array(action_chains),
'obs_t': np.array(obs_t)}
return data
@ray.remote
def ray_mj_raw_local_input(params):
return mj_raw_local_input(params)
def mj_raw_local_input(params):
env_name = params['env_name']
starting_state = params['starting_state']
max_action = np.array(params['max_action'])
num_steps_observation = params['num_steps_observation']
T = params['T']
num_samples = params['num_samples']
env = gym.make(env_name)
action_chains = []
obs_t = []
for i in range(num_samples):
complete_list = [env.reset()]
env.sim.set_state(starting_state)
action_list = []
for t in range(T + num_steps_observation - 1):
action = env.action_space.sample()
complete_list.append(env.step(action)[0])
action_list.append(action / max_action)
obs_t_list = complete_list[T:]
action_list = action_list[:T]
action_chains.append(np.concatenate(action_list, axis=0))
obs_t.append(np.concatenate(obs_t_list, axis=0))
data = {'actions': np.array(action_chains),
'obs_t': | np.array(obs_t) | numpy.array |
import numpy as np
from scipy import sparse
from mm2d import util
import qpoases
import IPython
# mpc parameters
NUM_WSR = 100 # number of working set recalculations
NUM_ITER = 3 # number of linearizations/iterations
# TODO experimental MPC controller that uses the SQP controller under the hood
# - is there is a significant penalty or wrapping things up as Python functions
# rather than directly as arrays?
# - ideally, we'd make a library of objectives, bounds, and constraints that
# could be put together for different behaviours
class TrackingMPC:
def __init__(self, model, dt, Q, R, num_horizon):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.num_horizon = num_horizon
ni = self.model.ni
nv = num_horizon * ni
# setup SQP values
bounds = sqp.Bounds(-model.vel_lim*np.ones(nv), model.vel_lim*np.ones(nv))
def obj_val(x0, xd, var):
q = x0
J = 0
for k in range(num_horizon):
u = var[k*ni:(k+1)*ni] # TODO would be nicer if var was 2D
q = q + dt * u
p = model.forward(q)
J += 0.5 * (p @ Q @ p + u @ R @ u)
return J
class MPC(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
fbar[k*no:(k+1)*no] = p
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = J
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
return H, g
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = ni*N joint acceleration constraints
num_var = ni * N
num_constraints = ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g = self._lookahead(q0, pr, u, N)
lb, ub = self._calc_vel_limits(u, ni, N)
A, lbA, ubA = self._calc_acc_limits(u, dq0, ni, N)
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g = self._lookahead(q0, pr, u, N)
lb, ub = self._calc_vel_limits(u, ni, N)
A, lbA, ubA = self._calc_acc_limits(u, dq0, ni, N)
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N)
# return first optimal input
return u[:self.model.ni]
class ObstacleAvoidingMPC(object):
''' Model predictive controller with obstacle avoidance. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
num_body_pts = 2
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
fbar[k*no:(k+1)*no] = p
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = J
# TODO hardcoded radius
# EE and obstacle
obs_radius = 0.6
d_ee_obs = np.linalg.norm(p - pc) - obs_radius
Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
lbA[k*num_body_pts] = -d_ee_obs
# base and obstacle
pb = q[:2]
Jb = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
d_base_obs = np.linalg.norm(pb - pc) - obs_radius - 0.56
Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
lbA[k*num_body_pts+1] = -d_base_obs
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N, pc):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = N obstacle constraints and ni*N joint acceleration
# constraints
num_var = ni * N
num_constraints = 2*N + ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
ubA_obs = np.infty * np.ones_like(lbA_obs)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N, pc):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N, pc)
# return first optimal input
return u[:self.model.ni]
class ObstacleAvoidingMPC2(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
num_body_pts = 2+1
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
pm = self.model.forward_m(q)
Jm = self.model.jacobian_m(q)
fbar[k*no:(k+1)*no] = pm
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
# TODO hardcoded radius
# EE and obstacle
d_ee_obs = np.linalg.norm(p - pc) - 0.5
Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
lbA[k*num_body_pts] = -d_ee_obs
# base and obstacle
pb = q[:2]
Jb = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
d_base_obs = np.linalg.norm(pb - pc) - 0.5 - 0.56
Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
lbA[k*num_body_pts+1] = -d_base_obs
# pf and ee: these need to stay close together
pf = self.model.forward_f(q)
Jf = self.model.jacobian_f(q)
d_pf_ee = np.linalg.norm(p - pf)
A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
lbA_pf_ee = d_pf_ee - 0.75
Abar[k*num_body_pts+2, k*ni:(k+1)*ni] = A_pf_ee
lbA[k*num_body_pts+2] = lbA_pf_ee
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N, pc):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = N obstacle constraints and ni*N joint acceleration
# constraints
num_var = ni * N
num_constraints = 3*N + ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
ubA_obs = np.infty * np.ones_like(lbA_obs)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N, pc):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N, pc)
# return first optimal input
return u[:self.model.ni]
# class MPC2(object):
# ''' Model predictive controller. '''
# def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
# self.model = model
# self.dt = dt
# self.Q = Q
# self.R = R
# self.vel_lim = vel_lim
# self.acc_lim = acc_lim
#
# def _lookahead(self, q0, pr, u, N):
# ''' Generate lifted matrices proprogating the state N timesteps into the
# future. '''
# ni = self.model.ni # number of joints
# no = self.model.no # number of Cartesian outputs
#
# fbar = np.zeros(no*N) # Lifted forward kinematics
# Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
# Qbar = np.kron(np.eye(N), self.Q)
# Rbar = np.kron(np.eye(N), self.R)
#
# # lower triangular matrix of ni*ni identity matrices
# Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
#
# # Integrate joint positions from the last iteration
# qbar = np.tile(q0, N+1)
# qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
#
# num_body_pts = 1
# Abar = np.zeros((N*num_body_pts, ni*N))
# lbA = np.zeros(N*num_body_pts)
#
# for k in range(N):
# q = qbar[(k+1)*ni:(k+2)*ni]
# p = self.model.forward(q)
# J = self.model.jacobian(q)
#
# pm = self.model.forward_m(q)
# Jm = self.model.jacobian_m(q)
#
# fbar[k*no:(k+1)*no] = pm
# Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
#
# # pf and ee
# pf = self.model.forward_f(q)
# Jf = self.model.jacobian_f(q)
# d_pf_ee = np.linalg.norm(p - pf)
# A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
# lbA_pf_ee = d_pf_ee - 0.75
# Abar[k*num_body_pts, k*ni:(k+1)*ni] = A_pf_ee
# lbA[k*num_body_pts] = lbA_pf_ee
#
# dbar = fbar - pr
#
# H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
# g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
# A = self.dt*Abar.dot(Ebar)
#
# return H, g, A, lbA
#
# def _calc_vel_limits(self, u, ni, N):
# L = np.ones(ni * N) * self.vel_lim
# lb = -L - u
# ub = L - u
# return lb, ub
#
# def _calc_acc_limits(self, u, dq0, ni, N):
# # u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# # u is [u_0, ..., u_{N-1}]
# u_prev = np.zeros(ni * N)
# u_prev[:ni] = dq0
# u_prev[ni:] = u[:-ni]
#
# L = self.dt * np.ones(ni * N) * self.acc_lim
# lbA = -L - u + u_prev
# ubA = L - u + u_prev
#
# d1 = np.ones(N)
# d2 = -np.ones(N - 1)
#
# # A0 is NxN
# A0 = sparse.diags((d1, d2), [0, -1]).toarray()
#
# # kron to make it work for n-dimensional inputs
# A = np.kron(A0, np.eye(ni))
#
# return A, lbA, ubA
#
# def _iterate(self, q0, dq0, pr, u, N):
# ni = self.model.ni
#
# # Create the QP, which we'll solve sequentially.
# # num vars, num constraints (note that constraints only refer to matrix
# # constraints rather than bounds)
# # num constraints = N obstacle constraints and ni*N joint acceleration
# # constraints
# num_var = ni * N
# num_constraints = N + ni * N
# qp = qpoases.PySQProblem(num_var, num_constraints)
# options = qpoases.PyOptions()
# options.printLevel = qpoases.PyPrintLevel.NONE
# qp.setOptions(options)
#
# # Initial opt problem.
# H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N)
# ubA_obs = np.infty * np.ones_like(lbA_obs)
#
# lb, ub = self._calc_vel_limits(u, ni, N)
# A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
#
# A = np.vstack((A_obs, A_acc))
# lbA = np.concatenate((lbA_obs, lbA_acc))
# ubA = np.concatenate((ubA_obs, ubA_acc))
#
# ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
# delta = np.zeros(ni * N)
# qp.getPrimalSolution(delta)
# u = u + delta
#
# # Remaining sequence is hotstarted from the first.
# for i in range(NUM_ITER - 1):
# H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N)
# lb, ub = self._calc_vel_limits(u, ni, N)
# A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
# A = np.vstack((A_obs, A_acc))
# lbA = np.concatenate((lbA_obs, lbA_acc))
# ubA = np.concatenate((ubA_obs, ubA_acc))
#
# qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
# qp.getPrimalSolution(delta)
#
# u = u + delta
#
# return u
#
# def solve(self, q0, dq0, pr, N):
# ''' Solve the MPC problem at current state x0 given desired output
# trajectory Yd. '''
# # initialize optimal inputs
# u = np.zeros(self.model.ni * N)
#
# # iterate to final solution
# u = self._iterate(q0, dq0, pr, u, N)
#
# # return first optimal input
# return u[:self.model.ni]
class MPC2(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
num_body_pts = 2+1
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
pm = self.model.forward_m(q)
Jm = self.model.jacobian_m(q)
fbar[k*no:(k+1)*no] = pm
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
# TODO hardcoded radius
# EE and obstacle
d_ee_obs = np.linalg.norm(p - pc) - 0.45
Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
lbA[k*num_body_pts] = -d_ee_obs
# base and obstacle
pb = q[:2]
Jb = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
d_base_obs = np.linalg.norm(pb - pc) - 0.45 - 0.56
Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
lbA[k*num_body_pts+1] = -d_base_obs
# pf and ee: these need to stay close together
pf = self.model.forward_f(q)
Jf = self.model.jacobian_f(q)
d_pf_ee = np.linalg.norm(p - pf)
A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
lbA_pf_ee = d_pf_ee - 0.75
Abar[k*num_body_pts+2, k*ni:(k+1)*ni] = A_pf_ee
lbA[k*num_body_pts+2] = lbA_pf_ee
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = | np.ones(N) | numpy.ones |
from worlds.cookbook import Cookbook
from misc import array
import curses
import logging
import numpy as np
from skimage.measure import block_reduce
import time
WIDTH = 10
HEIGHT = 10
WINDOW_WIDTH = 5
WINDOW_HEIGHT = 5
N_WORKSHOPS = 3
DOWN = 0
UP = 1
LEFT = 2
RIGHT = 3
USE = 4
N_ACTIONS = USE + 1
def random_free(grid, random):
pos = None
while pos is None:
(x, y) = (random.randint(WIDTH), random.randint(HEIGHT))
if grid[x, y, :].any():
continue
pos = (x, y)
return pos
def neighbors(pos, dir=None):
x, y = pos
neighbors = []
if x > 0 and (dir is None or dir == LEFT):
neighbors.append((x-1, y))
if y > 0 and (dir is None or dir == DOWN):
neighbors.append((x, y-1))
if x < WIDTH - 1 and (dir is None or dir == RIGHT):
neighbors.append((x+1, y))
if y < HEIGHT - 1 and (dir is None or dir == UP):
neighbors.append((x, y+1))
return neighbors
class CraftWorld(object):
def __init__(self, config):
self.cookbook = Cookbook(config.recipes)
self.n_features = \
2 * WINDOW_WIDTH * WINDOW_HEIGHT * self.cookbook.n_kinds + \
self.cookbook.n_kinds + \
4 + \
1
self.n_actions = N_ACTIONS
self.non_grabbable_indices = self.cookbook.environment
self.grabbable_indices = [i for i in range(self.cookbook.n_kinds)
if i not in self.non_grabbable_indices]
self.workshop_indices = [self.cookbook.index["workshop%d" % i]
for i in range(N_WORKSHOPS)]
self.water_index = self.cookbook.index["water"]
self.stone_index = self.cookbook.index["stone"]
self.random = np.random.RandomState(0)
def sample_scenario_with_goal(self, goal):
assert goal not in self.cookbook.environment
if goal in self.cookbook.primitives:
make_island = goal == self.cookbook.index["gold"]
make_cave = goal == self.cookbook.index["gem"]
return self.sample_scenario({goal: 1}, make_island=make_island,
make_cave=make_cave)
elif goal in self.cookbook.recipes:
ingredients = self.cookbook.primitives_for(goal)
return self.sample_scenario(ingredients)
else:
assert False, "don't know how to build a scenario for %s" % goal
def sample_scenario(self, ingredients, make_island=False, make_cave=False):
# generate grid
grid = np.zeros((WIDTH, HEIGHT, self.cookbook.n_kinds))
i_bd = self.cookbook.index["boundary"]
grid[0, :, i_bd] = 1
grid[WIDTH-1:, :, i_bd] = 1
grid[:, 0, i_bd] = 1
grid[:, HEIGHT-1:, i_bd] = 1
# treasure
if make_island or make_cave:
(gx, gy) = (1 + | np.random.randint(WIDTH-2) | numpy.random.randint |
import pytest
from numpy import allclose, arange, array, asarray, dot, cov, corrcoef, float64
from thunder.series.readers import fromlist, fromarray
from thunder.images.readers import fromlist as img_fromlist
pytestmark = pytest.mark.usefixtures("eng")
def test_map(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
assert allclose(data.map(lambda x: x + 1).toarray(), [[2, 3], [4, 5]])
assert data.map(lambda x: 1.0*x, dtype=float64).dtype == float64
assert data.map(lambda x: 1.0*x).dtype == float64
def test_map_singletons(eng):
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng)
mapped = data.map(lambda x: x.mean())
assert mapped.shape == (2, 1)
def test_filter(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
assert allclose(data.filter(lambda x: x.sum() > 3).toarray(), [3, 4])
def test_flatten(eng):
arr = arange(2*2*5).reshape(2, 2, 5)
data = fromarray(arr, engine=eng)
assert data.flatten().shape == (4, 5)
assert allclose(data.flatten().toarray(), arr.reshape(2*2, 5))
def test_sample(eng):
data = fromlist([array([1, 5]), array([1, 10]), array([1, 15])], engine=eng)
assert allclose(data.sample(3).shape, (3, 2))
assert allclose(data.filter(lambda x: x.max() > 10).sample(1).toarray(), [1, 15])
def test_between(eng):
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng)
val = data.between(0, 2)
assert allclose(val.index, array([0, 1]))
assert allclose(val.toarray(), array([[4, 5], [8, 9]]))
def test_first(eng):
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng)
assert allclose(data.first(), [4, 5, 6, 7])
def test_select(eng):
index = ['label1', 'label2', 'label3', 'label4']
data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng, index=index)
assert data.select('label1').shape == (2, 1)
assert allclose(data.select('label1').toarray(), [4, 8])
assert allclose(data.select(['label1']).toarray(), [4, 8])
assert allclose(data.select(['label1', 'label2']).toarray(), array([[4, 5], [8, 9]]))
assert data.select('label1').index == ['label1']
assert data.select(['label1']).index == ['label1']
def test_standardize_axis1(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
centered = data.center(1)
standardized = data.standardize(1)
zscored = data.zscore(1)
assert allclose(centered.toarray(), array([-2, -1, 0, 1, 2]), atol=1e-3)
assert allclose(standardized.toarray(),
array([0.70710, 1.41421, 2.12132, 2.82842, 3.53553]), atol=1e-3)
assert allclose(zscored.toarray(),
array([-1.41421, -0.70710, 0, 0.70710, 1.41421]), atol=1e-3)
def test_standardize_axis0(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
centered = data.center(0)
standardized = data.standardize(0)
zscored = data.zscore(0)
assert allclose(centered.toarray(), array([[-1, -1], [1, 1]]), atol=1e-3)
assert allclose(standardized.toarray(), array([[1, 2], [3, 4]]), atol=1e-3)
assert allclose(zscored.toarray(), array([[-1, -1], [1, 1]]), atol=1e-3)
def test_squelch(eng):
data = fromlist([array([1, 2]), array([3, 4])], engine=eng)
squelched = data.squelch(5)
assert allclose(squelched.toarray(), [[0, 0], [0, 0]])
squelched = data.squelch(3)
assert allclose(squelched.toarray(), [[0, 0], [3, 4]])
squelched = data.squelch(1)
assert allclose(squelched.toarray(), [[1, 2], [3, 4]])
def test_correlate(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
sig = [4, 5, 6, 7, 8]
corr = data.correlate(sig).toarray()
assert allclose(corr, 1)
sigs = [[4, 5, 6, 7, 8], [8, 7, 6, 5, 4]]
corrs = data.correlate(sigs).toarray()
assert allclose(corrs, [1, -1])
def test_correlate_multiindex(eng):
index = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
data = fromlist([array([1, 2, 3, 4, 5])], index=asarray(index).T, engine=eng)
sig = [4, 5, 6, 7, 8]
corr = data.correlate(sig).toarray()
assert allclose(corr, 1)
sigs = [[4, 5, 6, 7, 8], [8, 7, 6, 5, 4]]
corrs = data.correlate(sigs).toarray()
assert allclose(corrs, [1, -1])
def test_clip(eng):
data = fromlist([array([1, 2, 3, 4, 5])], engine=eng)
assert allclose(data.clip(2).toarray(), [2, 2, 3, 4, 5])
assert allclose(data.clip(2, 3).toarray(), [2, 2, 3, 3, 3])
def test_mean(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.mean().toarray()
expected = data.toarray().mean(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
def test_sum(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.sum().toarray()
expected = data.toarray().sum(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'int64'
def test_var(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.var().toarray()
expected = data.toarray().var(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
def test_std(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.std().toarray()
expected = data.toarray().std(axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
def test_max(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.max().toarray()
expected = data.toarray().max(axis=0)
assert allclose(val, expected)
def test_min(eng):
data = fromlist([arange(8), arange(8)], engine=eng)
val = data.min().toarray()
expected = data.toarray().min(axis=0)
assert allclose(val, expected)
def test_labels(eng):
x = [array([0, 1]), array([2, 3]), array([4, 5]), array([6, 7])]
data = fromlist(x, labels=[0, 1, 2, 3], engine=eng)
assert allclose(data.filter(lambda x: x[0]>2).labels, array([2, 3]))
assert allclose(data[2:].labels, array([2, 3]))
assert allclose(data[1].labels, array([1]))
assert allclose(data[1, :].labels, array([1]))
assert allclose(data[[0, 2]].labels, array([0, 2]))
assert allclose(data.flatten().labels, array([0, 1, 2, 3]))
x = [array([[0, 1],[2, 3]]), array([[4, 5], [6, 7]])]
data = img_fromlist(x, engine=eng).toseries()
data.labels = [[0, 1], [2, 3]]
assert allclose(data.filter(lambda x: x[0]>1).labels, | array([2, 3]) | numpy.array |
# pylint: disable=C0114,C0115,C0116
import unittest
import numpy as np
from scipy import constants as const
from nonrad.scaling import (charged_supercell_scaling,
charged_supercell_scaling_VASP, distance_PBC,
find_charge_center, radial_distribution,
sommerfeld_parameter, thermal_velocity)
from nonrad.tests import TEST_FILES, FakeFig
class SommerfeldTest(unittest.TestCase):
def setUp(self):
self.args = {
'T': 300,
'Z': 0,
'm_eff': 1.,
'eps0': 1.,
'method': 'Integrate'
}
def test_neutral(self):
self.assertAlmostEqual(sommerfeld_parameter(**self.args), 1.)
self.args['method'] = 'Analytic'
self.assertAlmostEqual(sommerfeld_parameter(**self.args), 1.)
def test_attractive(self):
self.args['Z'] = -1
self.assertGreater(sommerfeld_parameter(**self.args), 1.)
self.args['method'] = 'Analytic'
self.assertGreater(sommerfeld_parameter(**self.args), 1.)
def test_repulsive(self):
self.args['Z'] = 1
self.assertLess(sommerfeld_parameter(**self.args), 1.)
self.args['method'] = 'Analytic'
self.assertLess(sommerfeld_parameter(**self.args), 1.)
def test_list(self):
self.args['T'] = np.linspace(0.1, 1000, 100)
self.assertEqual(sommerfeld_parameter(**self.args), 1.)
self.args['Z'] = -1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) > 1.))
self.args['Z'] = 1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) < 1.))
self.args['Z'] = 0
self.args['method'] = 'Analytic'
self.assertEqual(sommerfeld_parameter(**self.args), 1.)
self.args['Z'] = -1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) > 1.))
self.args['Z'] = 1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) < 1.))
def test_compare_methods(self):
self.args = {
'T': 150,
'Z': -1,
'm_eff': 0.2,
'eps0': 8.9,
'method': 'Integrate'
}
f0 = sommerfeld_parameter(**self.args)
self.args['method'] = 'Analytic'
f1 = sommerfeld_parameter(**self.args)
self.assertAlmostEqual(f0, f1, places=2)
self.args['Z'] = 1
self.args['T'] = 900
f0 = sommerfeld_parameter(**self.args)
self.args['method'] = 'Integrate'
f1 = sommerfeld_parameter(**self.args)
self.assertGreater(np.abs(f0-f1)/f1, 0.1)
class ChargedSupercellScalingTest(unittest.TestCase):
def test_find_charge_center(self):
lattice = np.eye(3)
density = np.ones((50, 50, 50))
self.assertTrue(
np.allclose(find_charge_center(density, lattice), [0.49]*3)
)
density = np.zeros((50, 50, 50))
density[0, 0, 0] = 1.
self.assertTrue(
np.allclose(find_charge_center(density, lattice), [0.]*3)
)
def test_distance_PBC(self):
a = np.array([0.25]*3)
b = np.array([0.5]*3)
lattice = np.eye(3)
self.assertEqual(distance_PBC(a, b, lattice), np.sqrt(3)*0.25)
b = np.array([0.9]*3)
self.assertEqual(distance_PBC(a, b, lattice), np.sqrt(3)*0.35)
def test_radial_distribution(self):
lattice = np.eye(3)
density = np.zeros((50, 50, 50))
density[0, 0, 0] = 1.
point = np.array([0.]*3)
dist = distance_PBC(np.zeros(3), point, lattice)
r, n = radial_distribution(density, point, lattice)
self.assertAlmostEqual(r[np.where(n == 1.)[0][0]], dist)
point = np.array([0.25]*3)
dist = distance_PBC(np.zeros(3), point, lattice)
r, n = radial_distribution(density, point, lattice)
self.assertAlmostEqual(r[np.where(n == 1.)[0][0]], dist)
point = np.array([0.29, 0.73, 0.44])
dist = distance_PBC(np.zeros(3), point, lattice)
r, n = radial_distribution(density, point, lattice)
self.assertAlmostEqual(r[np.where(n == 1.)[0][0]], dist)
@unittest.skip('WAVECARs too large to share')
def test_charged_supercell_scaling_VASP(self):
f = charged_supercell_scaling_VASP(
str(TEST_FILES / 'WAVECAR.C-'),
189,
def_index=192
)
self.assertAlmostEqual(f, 1.08)
def test_charged_supercell_scaling(self):
# test that numbers work out for homogeneous case
wf = np.ones((20, 20, 20))
f = charged_supercell_scaling(wf, 10*np.eye(3), np.array([0.]*3))
self.assertAlmostEqual(f, 1.00)
# test the plotting stuff
wf = | np.ones((1, 1, 1)) | numpy.ones |
import numpy as np
from matplotlib import patches
import matplotlib.pyplot as plt
# Use xkcd-style figures.
plt.xkcd()
# Some settings
fs = 14
# # # (A) Figure with survey and computational domains, buffer. # # #
fig, ax = plt.subplots(1, 1, figsize=(11, 7))
# Plot domains.
dinp1 = {'fc': 'none', 'zorder': 2}
dc = patches.Rectangle((0, 0), 100, 60, color='.9')
dcf = patches.Rectangle((0, 0), 100, 60, ec='C0', **dinp1)
ds = patches.Rectangle((15, 10), 70, 40, fc='w')
dsf = patches.Rectangle((15, 10), 70, 40, ec='C1', **dinp1)
for d in [dc, dcf, ds, dsf]:
ax.add_patch(d)
dinp2 = {'verticalalignment': 'center', 'zorder': 2}
ax.text(60, 60, r'Computational domain $D_c$', c='C0', **dinp2)
ax.text(60, 50, r'Survey domain $D_s$', c='C1', **dinp2)
# plot seasurface, seafloor, receivers, source.
x = | np.arange(101) | numpy.arange |
import pytest
import numpy as np
import matplotlib.pyplot as plt
from fffit.pareto import (
is_pareto_efficient_simple,
is_pareto_efficient,
find_pareto_set,
plt_pareto_2D
)
from fffit.tests.base_test import BaseTest
class TestPareto(BaseTest):
def test_pareto_simple_known(self):
costs = np.asarray([[0.2, 0.2], [0.1, 0.1],])
result, pareto_points, dominated_points = find_pareto_set(
costs, is_pareto_efficient_simple
)
assert np.allclose(result, [False, True])
assert np.allclose(pareto_points, [[0.1, 0.1]])
assert np.allclose(dominated_points, [[0.2, 0.2]])
def test_paret_simple_max(self):
costs = np.asarray([[0.2, 0.2], [0.1, 0.1],])
result, pareto_points, dominated_points = find_pareto_set(
costs, is_pareto_efficient_simple, max_front=True
)
assert np.allclose(result, [True, False])
assert np.allclose(pareto_points, [[0.2, 0.2]])
assert np.allclose(dominated_points, [[0.1, 0.1]])
def test_pareto_efficient_known(self):
costs = np.asarray([[0.2, 0.2], [0.1, 0.1],])
result, pareto_points, dominated_points = find_pareto_set(
costs, is_pareto_efficient
)
assert np.allclose(result, [False, True])
assert np.allclose(pareto_points, [[0.1, 0.1]])
assert np.allclose(dominated_points, [[0.2, 0.2]])
def test_paret_efficient_max(self):
costs = np.asarray([[0.2, 0.2], [0.1, 0.1],])
result, pareto_points, dominated_points = find_pareto_set(
costs, is_pareto_efficient, max_front=True
)
assert | np.allclose(result, [True, False]) | numpy.allclose |
from __future__ import print_function, division, absolute_import
import functools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import PIL.Image
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageFilter
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import random as iarandom
from imgaug.testutils import reseed
def _test_shape_hw(func):
img = np.arange(20*10).reshape((20, 10)).astype(np.uint8)
observed = func(np.copy(img))
expected = func(
np.tile(np.copy(img)[:, :, np.newaxis], (1, 1, 3)),
)[:, :, 0]
assert observed.dtype.name == "uint8"
assert observed.shape == (20, 10)
assert np.array_equal(observed, expected)
def _test_shape_hw1(func):
img = np.arange(20*10*1).reshape((20, 10, 1)).astype(np.uint8)
observed = func(np.copy(img))
expected = func(
np.tile(np.copy(img), (1, 1, 3)),
)[:, :, 0:1]
assert observed.dtype.name == "uint8"
assert observed.shape == (20, 10, 1)
assert np.array_equal(observed, expected)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = | np.zeros((1,), dtype=np.uint8) | numpy.zeros |
import numpy
import operator
from math import sqrt
from amuse.support import exceptions
from amuse.support import console
from amuse.support.core import late
from amuse.support.core import compare_version_strings
from amuse.units import core
from amuse.units.si import none
from amuse.units.core import zero_unit
try:
import astropy.units
import amuse.units.si
HAS_ASTROPY = True
except ImportError:
HAS_ASTROPY = False
"""
"""
class Quantity(object):
"""
A Quantity objects represents a scalar or vector with a
specific unit. Quantity is an abstract base class
for VectorQuantity and ScalarQuantity.
Quantities should be constructed using *or-operator* ("|"),
*new_quantity* or *unit.new_quantity*.
Quantities emulate numeric types.
Examples
>>> from amuse.units import units
>>> 100 | units.m
quantity<100 m>
>>> (100 | units.m) + (1 | units.km)
quantity<1100.0 m>
Quantities can be tested
>>> from amuse.units import units
>>> x = 100 | units.m
>>> x.is_quantity()
True
>>> x.is_scalar()
True
>>> x.is_vector()
False
>>> v = [100, 200, 300] | units.g
>>> v.is_quantity()
True
>>> v.is_scalar()
False
>>> v.is_vector()
True
Quantities can be converted to numbers
>>> from amuse.units import units
>>> x = 1000.0 | units.m
>>> x.value_in(units.m)
1000.0
>>> x.value_in(units.km)
1.0
>>> x.value_in(units.g) # but only if the units are compatible!
Traceback (most recent call last):
File "<stdin>", line 1, in ?
IncompatibleUnitsException: Cannot express m in g, the units do not have the same bases
"""
__slots__ = ['unit']
__array_priority__ = 101
def __init__(self, unit):
self.unit = unit
def __str__(self):
return console.current_printing_strategy.quantity_to_string(self)
def is_quantity(self):
"""
True for all quantities.
"""
return True
def is_scalar(self):
"""
True for scalar quantities.
"""
return False
def is_vector(self):
"""
True for vector quantities.
"""
return False
def __repr__(self):
return 'quantity<'+str(self)+'>'
def __add__(self, other):
if self.unit.is_zero():
other=to_quantity(other)
return new_quantity(other.number, other.unit)
else:
other = to_quantity(other)
factor = other.unit.conversion_factor_from(self.unit)
return new_quantity(self.number + factor*other.number, self.unit)
__radd__ = __add__
def __sub__(self, other):
if self.unit.is_zero():
return -other
else:
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity(self.number - other_in_my_units.number, self.unit)
def __rsub__(self, other):
if self.unit.is_zero():
return new_quantity(other.number, other.unit)
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity(other_in_my_units.number - self.number, self.unit)
def __mul__(self, other):
other = to_quantity(other)
return new_quantity_nonone(self.number * other.number, (self.unit * other.unit).to_simple_form())
__rmul__ = __mul__
def __pow__(self, other):
return new_quantity(self.number ** other, self.unit ** other)
def __truediv__(self, other):
other = to_quantity(other)
return new_quantity_nonone(operator.__truediv__(self.number,other.number), (self.unit / other.unit).to_simple_form())
def __rtruediv__(self, other):
return new_quantity_nonone(operator.__truediv__(other,self.number), (1.0 / self.unit).to_simple_form())
def __floordiv__(self, other):
other = to_quantity(other)
return new_quantity_nonone(operator.__floordiv__(self.number,other.number), (self.unit / other.unit).to_simple_form())
def __rfloordiv__(self, other):
return new_quantity_nonone(operator.__floordiv__(other,self.number), (1.0 / self.unit).to_simple_form())
def __div__(self, other):
other = to_quantity(other)
return new_quantity_nonone(self.number/other.number, (self.unit / other.unit).to_simple_form())
def __rdiv__(self, other):
return new_quantity_nonone(other/self.number, (1.0 / self.unit).to_simple_form())
def __mod__(self, other):
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity_nonone(numpy.mod(self.number , other_in_my_units.number), self.unit)
def __rmod__(self, other):
other_in_my_units = to_quantity(other).as_quantity_in(self.unit)
return new_quantity_nonone(numpy.mod(other_in_my_units.number , self.number), self.unit)
def in_base(self):
unit=self.unit.base_unit()
return self.as_quantity_in(unit)
def sqrt(self):
"""Calculate the square root of each component
>>> from amuse.units import units
>>> s1 = 144.0 | units.m**2
>>> s1.sqrt()
quantity<12.0 m>
>>> v1 = [16.0, 25.0, 36.0] | units.kg
>>> v1.sqrt()
quantity<[4.0, 5.0, 6.0] kg**0.5>
"""
return new_quantity(numpy.sqrt(self.number), (self.unit ** 0.5).to_simple_form())
def as_quantity_in(self, another_unit):
"""
Reproduce quantity in another unit.
The new unit must have the same basic si quantities.
:argument another_unit: unit to convert quantity to
:returns: quantity converted to new unit
"""
if isinstance(another_unit, Quantity):
raise exceptions.AmuseException("Cannot expres a unit in a quantity")
factor = self.unit.conversion_factor_from(another_unit)
return new_quantity(self.number * factor, another_unit)
in_=as_quantity_in
def as_string_in(self, another_unit):
"""
Create a string representing the quantity in another unit.
The new unit must have the same basic si quantities.
:argument another_unit: unit to convert quantity to
:returns: string representing quantity converted to new unit
"""
return console.DefaultPrintingStrategy().quantity_to_string(self.as_quantity_in(another_unit))
def value_in(self, unit):
"""
Return a numeric value (for scalars) or array (for vectors)
in the given unit.
A number is returned without any unit information. Use this
function only to transfer values to other libraries that have
no support for quantities (for example plotting).
:argument unit: wanted unit of the value
:returns: number in the given unit
>>> from amuse.units import units
>>> x = 10 | units.km
>>> x.value_in(units.m)
10000.0
"""
value_of_unit_in_another_unit = self.unit.value_in(unit)
return self.number * value_of_unit_in_another_unit
def __abs__(self):
"""
Return the absolute value of this quantity
>>> from amuse.units import units
>>> x = -10 | units.km
>>> print abs(x)
10 km
"""
return new_quantity(abs(self.number), self.unit)
def __neg__(self):
"""
Unary minus.
>>> from amuse.units import units
>>> x = -10 | units.km
>>> print -x
10 km
"""
return new_quantity(-self.number, self.unit)
def __lt__(self, other):
return self.value_in(self.unit) < to_quantity(other).value_in(self.unit)
def __gt__(self, other):
return self.value_in(self.unit) > to_quantity(other).value_in(self.unit)
def __eq__(self, other):
return self.value_in(self.unit) == to_quantity(other).value_in(self.unit)
def __ne__(self, other):
return self.value_in(self.unit) != to_quantity(other).value_in(self.unit)
def __le__(self, other):
return self.value_in(self.unit) <= to_quantity(other).value_in(self.unit)
def __ge__(self, other):
return self.value_in(self.unit) >= to_quantity(other).value_in(self.unit)
if HAS_ASTROPY:
def as_astropy_quantity(self):
return to_astropy(self)
class ScalarQuantity(Quantity):
"""
A ScalarQuantity object represents a physical scalar
quantity.
"""
__slots__ = ['number']
def __init__(self, number, unit):
# Quantity.__init__(self, unit)
# commented out super call, this speeds thing up
self.unit = unit
if unit.dtype is None:
self.number = number
else:
if isinstance(unit.dtype, numpy.dtype):
self.number = unit.dtype.type(number)
else:
self.number = unit.dtype(number)
def is_scalar(self):
return True
def as_vector_with_length(self, length):
return VectorQuantity(numpy.ones(length, dtype=self.unit.dtype) * self.number, self.unit)
def reshape(self, shape):
if shape == -1 or (len(shape) == 1 and shape[0] == 1):
return VectorQuantity([self.number], self.unit)
else:
raise exceptions.AmuseException("Cannot reshape a scalar to vector of shape '{0}'".format(shape))
def __getitem__(self, index):
if index == 0:
return self
else:
raise exceptions.AmuseException("ScalarQuantity does not support indexing")
def copy(self):
return new_quantity(self.number, self.unit)
def to_unit(self):
in_base=self.in_base()
return in_base.number * in_base.unit
def __getstate__(self):
return (self.unit, self.number)
def round(self, decimals = 0):
return new_quantity(numpy.round(self.number, decimals), self.unit)
def new_zeros_array(self, length):
array = numpy.zeros(length, dtype=self.unit.dtype)
return new_quantity(array, self.unit)
def __setstate__(self, x):
self.unit = x[0]
self.number = x[1]
def sum(self, axis=None, dtype=None, out=None):
return self
def cumsum(self, axis=None, dtype=None, out=None):
return self
def prod(self, axis=None, dtype=None):
return self
def min(self, axis = None):
return self
def max(self, axis = None):
return self
amin=min
amax=max
def sorted(self):
return self
def as_unit(self):
return self.number * self.unit
class _flatiter_wrapper(object):
def __init__(self, quantity):
self.flat=quantity.number.flat
self.quantity=quantity
def __iter__(self):
return self
def __next__(self):
return new_quantity(next(self.flat),self.quantity.unit)
def __getitem__(self,x):
return new_quantity(self.flat[x], self.quantity.unit)
def __setitem__(self,index,x):
return self.flat.__setitem__(index,x.value_in(self.quantity.unit))
@property
def base(self):
return self.quantity
@property
def index(self):
return self.flat.index
@property
def coords(self):
return self.flat.coords
@property
def unit(self):
return self.quantity.unit
@property
def number(self):
return self.flat
def copy(self):
return new_quantity(self.flat.copy(), self.quantity.unit)
def is_quantity(self):
return True
def value_in(self, unit):
return self.copy().value_in(unit)
def as_quantity_in(self, unit):
return self.copy().as_quantity_in(unit)
# todo: add as required
class VectorQuantity(Quantity):
"""
A VectorQuantity object represents a physical vector
quantity.
>>> from amuse.units import units
>>> v1 = [0.0, 1.0, 2.0] | units.kg
>>> v2 = [2.0, 4.0, 6.0] | units.kg
>>> v1 + v2
quantity<[2.0, 5.0, 8.0] kg>
>>> len(v1)
3
"""
__slots__ = ['_number']
def __init__(self, array, unit):
Quantity.__init__(self, unit)
if unit is None:
self._number = numpy.array((), dtype='float64')
else:
self._number = numpy.asarray(array, dtype=unit.dtype)
@classmethod
def new_from_scalar_quantities(cls, *values):
unit=to_quantity(values[0]).unit
try:
array=[value_in(x,unit) for x in values]
except core.IncompatibleUnitsException:
raise exceptions.AmuseException("not all values have conforming units")
return cls(array, unit)
@classmethod
def new_from_array(cls, array):
shape=array.shape
vector=cls.new_from_scalar_quantities(*array.flat)
return vector.reshape(shape)
def aszeros(self):
return new_quantity(numpy.zeros(self.shape, dtype=self.number.dtype), self.unit)
def new_zeros_array(self, length):
array = numpy.zeros(length, dtype=self.unit.dtype)
return type(self)(array, self.unit)
@classmethod
def zeros(cls, length, unit):
array = numpy.zeros(length, dtype=unit.dtype)
return cls(array, unit)
@classmethod
def arange(cls, begin, end, step):
return arange(begin, end, step)
@property
def shape(self):
return self.number.shape
@property
def dtype(self):
return self.number.dtype
def flatten(self):
return new_quantity(self.number.flatten(), self.unit)
@property
def flat(self):
return _flatiter_wrapper(self)
def is_vector(self):
return True
def as_vector_with_length(self, length):
if len(self)==length:
return self.copy()
if len(self)==1:
return self.new_from_scalar_quantities(*[self[0]]*length)
raise exceptions.AmuseException("as_vector_with_length only valid for same length or 1")
def as_vector_quantity(self):
return self
def __len__(self):
return len(self._number)
def split(self, indices_or_sections, axis = 0):
parts = numpy.split(self.number, indices_or_sections, axis)
return [VectorQuantity(x, self.unit) for x in parts]
def array_split(self, indices_or_sections, axis = 0):
parts = numpy.array_split(self.number, indices_or_sections, axis)
return [VectorQuantity(x, self.unit) for x in parts]
def sum(self, axis=None, dtype=None, out=None):
"""Calculate the sum of the vector components
>>> from amuse.units import units
>>> v1 = [0.0, 1.0, 2.0] | units.kg
>>> v1.sum()
quantity<3.0 kg>
"""
return new_quantity(self.number.sum(axis, dtype, out), self.unit)
def cumsum(self, axis=None, dtype=None, out=None):
""" Calculate the cumulative sum of the elements along a given axis. """
return new_quantity(numpy.cumsum(self.number, axis, dtype, out), self.unit)
def prod(self, axis=None, dtype=None):
"""Calculate the product of the vector components
>>> from amuse.units import units
>>> v1 = [1.0, 2.0, 3.0] | units.m
>>> v1.prod()
quantity<6.0 m**3>
>>> v1 = [[2.0, 3.0], [2.0, 4.0], [5.0,3.0] ] | units.m
>>> v1.prod()
quantity<720.0 m**6>
>>> v1.prod(0)
quantity<[20.0, 36.0] m**3>
>>> v1.prod(1)
quantity<[6.0, 8.0, 15.0] m**2>
>>> v1 = [[[2.0, 3.0], [2.0, 4.0]],[[5.0, 2.0], [3.0, 4.0]]] | units.m
>>> v1.prod() # doctest:+ELLIPSIS
quantity<5760.0 m**8...>
>>> v1.prod(0)
quantity<[[10.0, 6.0], [6.0, 16.0]] m**2>
>>> v1.prod(1)
quantity<[[4.0, 12.0], [15.0, 8.0]] m**2>
>>> v1.prod(2)
quantity<[[6.0, 8.0], [10.0, 12.0]] m**2>
"""
if axis is None:
return new_quantity_nonone(self.number.prod(axis, dtype), self.unit ** numpy.prod(self.number.shape))
else:
return new_quantity_nonone(self.number.prod(axis, dtype), self.unit ** self.number.shape[axis])
def inner(self, other):
"""Calculate the inner product of self with other.
>>> from amuse.units import units
>>> v1 = [1.0, 2.0, 3.0] | units.m
>>> v1.inner(v1)
quantity<14.0 m**2>
"""
other = to_quantity(other)
return new_quantity_nonone(numpy.inner(self._number, other._number), (self.unit * other.unit).to_simple_form())
def length_squared(self):
"""Calculate the squared length of the vector.
>>> from amuse.units import units
>>> v1 = [2.0, 3.0, 4.0] | units.m
>>> v1.length_squared()
quantity<29.0 m**2>
"""
return (self * self).sum()
def length(self):
"""Calculate the length of the vector.
>>> from amuse.units import units
>>> v1 = [0.0, 3.0, 4.0] | units.m
>>> v1.length()
quantity<5.0 m>
"""
return self.length_squared().sqrt()
def lengths(self):
"""Calculate the length of the vectors in this vector.
>>> from amuse.units import units
>>> v1 = [[0.0, 3.0, 4.0],[2.0 , 2.0 , 1.0]] | units.m
>>> v1.lengths()
quantity<[5.0, 3.0] m>
"""
return self.lengths_squared().sqrt()
def lengths_squared(self):
"""Calculate the length of the vectors in this vector
>>> from amuse.units import units
>>> v1 = [[0.0, 3.0, 4.0],[4.0, 2.0, 1.0]] | units.m
>>> v1.lengths_squared()
quantity<[25.0, 21.0] m**2>
"""
return (self.unit**2).new_quantity((self.number * self.number).sum(self.number.ndim - 1))
def __getitem__(self, index):
"""Return the "index" component as a quantity.
:argument index: index of the component, valid values
for 3 dimensional vectors are: ``[0,1,2]``
:returns: quantity with the same units
>>> from amuse.units import si
>>> vector = [0.0, 1.0, 2.0] | si.kg
>>> print vector[1]
1.0 kg
>>> print vector[0:2]
[0.0, 1.0] kg
>>> print vector[[0,2,]]
[0.0, 2.0] kg
"""
return new_quantity(self._number[index], self.unit)
def take(self, indices):
return VectorQuantity(self._number.take(indices), self.unit)
def put(self, indices, vector):
try:
if self.unit.is_zero():
self.unit = vector.unit
self._number.put(indices, vector.value_in(self.unit))
except AttributeError:
if not is_quantity(vector):
raise ValueError("Tried to put a non quantity value in a quantity")
raise
def __setitem__(self, index, quantity):
"""Update the "index" component to the specified quantity.
:argument index: index of the component, valid values
for 3 dimensional vectors are: ``[0,1,2]``
:quantity: quantity to set, will be converted to
the unit of this vector
>>> from amuse.units import si
>>> vector = [0.0, 1.0, 2.0] | si.kg
>>> g = si.kg / 1000
>>> vector[1] = 3500 | g
>>> print vector
[0.0, 3.5, 2.0] kg
"""
quantity = as_vector_quantity(quantity)
if self.unit.is_zero():
self.unit = quantity.unit
self._number[index] = quantity.value_in(self.unit)
@property
def number(self):
return self._number
@property
def x(self):
"""The x axis component of a 3 dimensional vector.
This is equavalent to the first component of vector.
:returns: x axis component as a quantity
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> print vector.x
1.0 kg
"""
return new_quantity(self.number[numpy.newaxis, ..., 0][0], self.unit)
@property
def y(self):
"""The y axis component of a 3 dimensional vector.
This is equavalent to the second component of vector.
:returns: y axis component as a quantity
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> print vector.y
2.0 kg
"""
return new_quantity(self.number[numpy.newaxis, ..., 1][0], self.unit)
@property
def z(self):
"""The z axis component of a 3 dimensional vector.
This is equavalent to the third component of vector.
:returns: z axis component as a quantity
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> print vector.z
3.0 kg
"""
return new_quantity(self.number[numpy.newaxis, ..., 2][0], self.unit)
def indices(self):
for x in len(self._number):
yield x
def copy(self):
return new_quantity(self.number.copy(), self.unit)
def norm_squared(self):
return self.length_squared()
def norm(self):
return self.length()
def append(self, scalar_quantity):
"""
Append a scalar quantity to this vector.
>>> from amuse.units import si
>>> vector = [1.0, 2.0, 3.0] | si.kg
>>> vector.append(4.0 | si.kg)
>>> print vector
[1.0, 2.0, 3.0, 4.0] kg
"""
append_number = numpy.array(scalar_quantity.value_in(self.unit)) # fix for deg, unitless
# The following lines make sure that appending vectors works as expected,
# e.g. ([]|units.m).append([1,2,3]|units.m) -> [[1,2,3]] | units.m
# e.g. ([[1,2,3]]|units.m).append([4,5,6]|units.m) -> [[1,2,3],[4,5,6]] | units.m
if (append_number.shape and (len(self._number) == 0 or self._number.shape[1:] == append_number.shape)):
new_shape = [1 + self._number.shape[0]] + list(append_number.shape)
else:
new_shape = -1
self._number = numpy.append(self._number, append_number).reshape(new_shape)
def extend(self, vector_quantity):
"""
Concatenate the vector quantity to this vector.
If the units differ, the vector_quantity argument
is converted to the units of this vector.
>>> from amuse.units import units
>>> vector1 = [1.0, 2.0, 3.0] | units.kg
>>> vector2 = [1500, 2500, 6000] | units.g
>>> vector1.extend(vector2)
>>> print vector1
[1.0, 2.0, 3.0, 1.5, 2.5, 6.0] kg
"""
self._number = numpy.concatenate((self._number, vector_quantity.value_in(self.unit)))
def prepend(self, scalar_quantity):
"""
Prepend the scalar quantity before this vector.
If the units differ, the scalar_quantity argument
is converted to the units of this vector.
>>> from amuse.units import units
>>> vector1 = [1.0, 2.0, 3.0] | units.kg
>>> vector1.prepend(0.0 | units.kg)
>>> print vector1
[0.0, 1.0, 2.0, 3.0] kg
"""
self._number = numpy.concatenate(([scalar_quantity.value_in(self.unit)], self._number))
def minimum(self, other):
"""
Return the minimum of self and the argument.
>>> from amuse.units import si
>>> v1 = [1.0, 2.0, 3.0] | si.kg
>>> v2 = [0.0, 3.0, 4.0] | si.kg
>>> v1.minimum(v2)
quantity<[0.0, 2.0, 3.0] kg>
"""
other_in_my_units = other.as_quantity_in(self.unit)
is_smaller_than = self.number < other_in_my_units.number
values = | numpy.where(is_smaller_than, self.number, other_in_my_units.number) | numpy.where |
#!/usr/bin/python
import pandas as pd
import sys, getopt
import matplotlib.pyplot as plt
import numpy as np
import re
import os
import glob
from matplotlib import cm
from scipy.optimize import minimize, brute
from scipy import interpolate, optimize
from mpl_toolkits.mplot3d import Axes3D, art3d
from matplotlib.patches import Circle, Ellipse
import pickle
import plotly.io as pio
import plotly.graph_objects as go
from scipy.interpolate import griddata
def add_point(ax, x, y, z, fc = None, ec = None, radius = 0.005, labelArg = None):
xy_len, z_len = ax.get_figure().get_size_inches()
axis_length = [x[1] - x[0] for x in [ax.get_xbound(), ax.get_ybound(), ax.get_zbound()]]
axis_rotation = {'z': ((x, y, z), axis_length[1]/axis_length[0]),
'y': ((x, z, y), axis_length[2]/axis_length[0]*xy_len/z_len),
'x': ((y, z, x), axis_length[2]/axis_length[1]*xy_len/z_len)}
i = 0
for a, ((x0, y0, z0), ratio) in axis_rotation.items():
p = Ellipse((x0, y0), width = radius, height = radius*ratio, fc=fc, ec=ec, label = labelArg if i == 0 else "")
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=z0, zdir=a)
i = i + 1
def find_minimum(path, model, wolfKind, potential, box, plotSuface=False):
df = pd.read_csv(path,sep='\t',index_col=0)
df = df.iloc[: , :-1]
dfMean = df.mean()
points = dfMean.index.map(lambda x: x.strip('('))
points = points.map(lambda x: x.strip(')'))
pointsSplit = points.str.split(pat=", ", expand=False)
df3 = pd.DataFrame(pointsSplit.tolist(), columns=['rcut','alpha'], dtype=np.float64)
df4 = pd.DataFrame(dfMean.values, columns=['err'], dtype=np.float64)
x = df3.iloc[:,0].to_numpy()
y = df3.iloc[:,1].to_numpy()
z = np.abs(df4.iloc[:,0].to_numpy())
rranges = slice(x.min(), x.max(), (x.max() - x.min())/650), slice(y.min(), y.max(), (y.max() - y.min())/650)
print(rranges)
F2 = interpolate.interp2d(x, y, z, kind='cubic')
xi = np.linspace(x.min(), x.max(), 6500)
yi = np.linspace(y.min(), y.max(), 6500)
X,Y = np.meshgrid(xi,yi)
Z2 = F2(xi, yi)
f = lambda x: np.abs(F2(*x))
bounds = [(x.min(), x.max()),(y.min(), y.max())]
bf = brute(f, rranges, full_output=True, finish=optimize.fmin)
bfXY = np.array(bf[0])
print(bfXY[0])
print(bfXY[1])
x0 = (bfXY[0], bfXY[1])
gd = minimize(f, x0, method='SLSQP', bounds=bounds)
print(gd)
gdXY = np.array(gd.x)
print(gdXY[0])
print(gdXY[1])
gdJacXY = np.array(gd.jac)
print(gdJacXY[0])
print(gdJacXY[1])
ZBF = F2(bfXY[0], bfXY[1])
ZGD = F2(gdXY[0], gdXY[1])
d = {'x': [gdXY[0]], 'y': [gdXY[1]], 'z':[ZGD]}
dfGD = pd.DataFrame(data=d)
print("ZBF : ", ZBF)
print("ZGD : ", ZGD)
if(plotSuface):
title = model+"_"+wolfKind+"_"+potential+"_Box_"+box
xi_forplotting = np.linspace(x.min(), x.max(), 1000)
yi_forplotting = np.linspace(y.min(), y.max(), 1000)
Z2_forplotting = F2(xi_forplotting, yi_forplotting)
prefix = os.path.split(path)
plotPath = os.path.join(prefix[0], title)
#fig.savefig(fname=plotPath+".png")
iteractivefig = go.Figure()
iteractivefig.add_surface(x=xi_forplotting,y=yi_forplotting,z=Z2_forplotting)
layout = go.Layout(title=title,autosize=True,
margin=dict(l=65, r=65, b=65, t=65))
iteractivefig.update_layout(layout)
iteractivefig.update_layout(scene = dict(
xaxis_title='RCut',
yaxis_title='Alpha',
zaxis_title='Relative Error'),
width=700,
margin=dict(r=20, b=10, l=10, t=10))
iteractivefig.update_traces(contours_z=dict(show=True, usecolormap=True,
highlightcolor="limegreen", project_z=True))
pio.write_html(iteractivefig, file=plotPath+".html", auto_open=False)
return (bfXY[0], bfXY[1], ZBF, gdXY[0], gdXY[1], ZGD, gdJacXY[0], gdJacXY[1])
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('3DSurface.py -i <intputfile.p> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('3DSurface.py -i <intputfile.p> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
parsingInputs = False
print('Input file path is', inputfile)
print('Output file is ', outputfile)
p = re.compile("Wolf_Calibration_(\w+?)_(\w+?)_BOX_(\d+)_(\w+?).dat")
calibrationFiles = sorted(glob.glob(os.path.join(inputfile,'Wolf_Calibration_*.dat')), key=os.path.getmtime)
print(calibrationFiles)
for calFile in calibrationFiles:
justFileName = os.path.basename(calFile)
print(justFileName)
groups = p.search(justFileName)
wolfKind = groups.group(1)
potential = groups.group(2)
box = groups.group(3)
print ("wolf Kind" , wolfKind)
print ("potential Kind" , potential)
print ("box" , box)
df = pd.read_csv(calFile,sep='\t',index_col=0)
df = df.iloc[: , :-1]
dfMean = df.mean()
points = dfMean.index.map(lambda x: x.strip('('))
points = points.map(lambda x: x.strip(')'))
pointsSplit = points.str.split(pat=", ", expand=False)
df3 = pd.DataFrame(pointsSplit.tolist(), columns=['rcut','alpha'], dtype=np.float64)
df4 = pd.DataFrame(dfMean.values, columns=['err'], dtype=np.float64)
print(df3)
print(df4)
minxy = df3.min()
maxxy = df3.max()
x = df3.iloc[:,0].to_numpy()
y = df3.iloc[:,1].to_numpy()
z = np.abs(df4.iloc[:,0].to_numpy())
x2 = np.linspace(minxy[0], maxxy[0], 6500)
y2 = np.linspace(minxy[1], minxy[1], 6500)
print((maxxy[0] - minxy[0]))
print((maxxy[1] - minxy[1]))
rranges = slice(minxy[0], maxxy[0], (maxxy[0] - minxy[0])/650), slice(minxy[1], maxxy[1], (maxxy[1] - minxy[1])/650)
print(rranges)
X2, Y2 = np.meshgrid(x2, y2)
X2, Y2 = np.meshgrid(x2, y2)
F2 = interpolate.interp2d(x, y, z, kind='quintic')
Z2 = F2(x2, y2)
f = lambda x: np.abs(F2(*x))
bounds = [(minxy[0], maxxy[0]),(minxy[1], maxxy[1])]
bf = brute(f, rranges, full_output=True, finish=optimize.fmin)
bfXY = np.array(bf[0])
print(bfXY[0])
print(bfXY[1])
x0 = (bfXY[0], bfXY[1])
gd = minimize(f, x0, method='SLSQP', bounds=bounds)
print(gd)
gdXY = | np.array(gd.x) | numpy.array |
#!/usr/bin/env python
import numpy as np
import itertools as it
from copy import deepcopy
import sys
from animation import *
from mobject import *
from constants import *
from mobject.region import *
import displayer as disp
from scene.scene import Scene, GraphScene
from scene.graphs import *
from .moser_main import EulersFormula
from script_wrapper import command_line_create_scene
MOVIE_PREFIX = "ecf_graph_scenes/"
RANDOLPH_SCALE_FACTOR = 0.3
EDGE_ANNOTATION_SCALE_FACTOR = 0.7
DUAL_CYCLE = [3, 4, 5, 6, 1, 0, 2, 3]
class EulersFormulaWords(Scene):
def construct(self):
self.add(TexMobject("V-E+F=2"))
class TheTheoremWords(Scene):
def construct(self):
self.add(TextMobject("The Theorem:"))
class ProofAtLastWords(Scene):
def construct(self):
self.add(TextMobject("The Proof At Last..."))
class DualSpanningTreeWords(Scene):
def construct(self):
self.add(TextMobject("Spanning trees have duals too!"))
class PreferOtherProofDialogue(Scene):
def construct(self):
teacher = Face("talking").shift(2*LEFT)
student = Face("straight").shift(2*RIGHT)
teacher_bubble = SpeechBubble(LEFT).speak_from(teacher)
student_bubble = SpeechBubble(RIGHT).speak_from(student)
teacher_bubble.write("Look at this \\\\ elegant proof!")
student_bubble.write("I prefer the \\\\ other proof.")
self.add(student, teacher, teacher_bubble, teacher_bubble.text)
self.wait(2)
self.play(Transform(
Dot(student_bubble.tip).set_color("black"),
Mobject(student_bubble, student_bubble.text)
))
self.wait(2)
self.remove(teacher_bubble.text)
teacher_bubble.write("Does that make this \\\\ any less elegant?")
self.add(teacher_bubble.text)
self.wait(2)
class IllustrateDuality(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.add(TextMobject("Duality").to_edge(UP))
self.remove(*self.vertices)
def special_alpha(t):
if t > 0.5:
t = 1 - t
if t < 0.25:
return smooth(4*t)
else:
return 1
kwargs = {
"run_time": 5.0,
"rate_func": special_alpha
}
self.play(*[
Transform(*edge_pair, **kwargs)
for edge_pair in zip(self.edges, self.dual_edges)
] + [
Transform(
Mobject(*[
self.vertices[index]
for index in cycle
]),
dv,
**kwargs
)
for cycle, dv in zip(
self.graph.region_cycles,
self.dual_vertices
)
])
self.wait()
class IntroduceGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
tweaked_graph = deepcopy(self.graph)
for index in 2, 4:
tweaked_graph.vertices[index] += 2.8*RIGHT + 1.8*DOWN
tweaked_self = GraphScene(tweaked_graph)
edges_to_remove = [
self.edges[self.graph.edges.index(pair)]
for pair in [(4, 5), (0, 5), (1, 5), (7, 1), (8, 3)]
]
connected, planar, graph = TextMobject([
"Connected ", "Planar ", "Graph"
]).to_edge(UP).split()
not_okay = TextMobject("Not Okay").set_color("red")
planar_explanation = TextMobject("""
(``Planar'' just means we can draw it without
intersecting lines)
""", size="\\small")
planar_explanation.shift(planar.get_center() + 0.5*DOWN)
self.draw_vertices()
self.draw_edges()
self.clear()
self.add(*self.vertices + self.edges)
self.wait()
self.add(graph)
self.wait()
kwargs = {
"rate_func": there_and_back,
"run_time": 5.0
}
self.add(not_okay)
self.play(*[
Transform(*pair, **kwargs)
for pair in zip(
self.edges + self.vertices,
tweaked_self.edges + tweaked_self.vertices,
)
])
self.remove(not_okay)
self.add(planar, planar_explanation)
self.wait(2)
self.remove(planar_explanation)
self.add(not_okay)
self.remove(*edges_to_remove)
self.play(ShowCreation(
Mobject(*edges_to_remove),
rate_func=lambda t: 1 - t,
run_time=1.0
))
self.wait(2)
self.remove(not_okay)
self.add(connected, *edges_to_remove)
self.wait()
class OldIntroduceGraphs(GraphScene):
def construct(self):
GraphScene.construct(self)
self.draw_vertices()
self.draw_edges()
self.wait()
self.clear()
self.add(*self.edges)
self.replace_vertices_with(Face().scale(0.4))
friends = TextMobject("Friends").scale(EDGE_ANNOTATION_SCALE_FACTOR)
self.annotate_edges(friends.shift((0, friends.get_height()/2, 0)))
self.play(*[
CounterclockwiseTransform(vertex, Dot(point))
for vertex, point in zip(self.vertices, self.points)
]+[
Transform(ann, line)
for ann, line in zip(
self.edge_annotations,
self.edges
)
])
self.wait()
class PlanarGraphDefinition(Scene):
def construct(self):
Not, quote, planar, end_quote = TextMobject([
"Not \\\\", "``", "Planar", "''",
# "no matter how \\\\ hard you try"
]).split()
shift_val = Mobject(Not, planar).to_corner().get_center()
Not.set_color("red").shift(shift_val)
graphs = [
Mobject(*GraphScene(g).mobjects)
for g in [
CubeGraph(),
CompleteGraph(5),
OctohedronGraph()
]
]
self.add(quote, planar, end_quote)
self.wait()
self.play(
FadeOut(quote),
FadeOut(end_quote),
ApplyMethod(planar.shift, shift_val),
FadeIn(graphs[0]),
run_time=1.5
)
self.wait()
self.remove(graphs[0])
self.add(graphs[1])
planar.set_color("red")
self.add(Not)
self.wait(2)
planar.set_color("white")
self.remove(Not)
self.remove(graphs[1])
self.add(graphs[2])
self.wait(2)
class TerminologyFromPolyhedra(GraphScene):
args_list = [(CubeGraph(),)]
def construct(self):
GraphScene.construct(self)
rot_kwargs = {
"radians": np.pi / 3,
"run_time": 5.0
}
vertices = [
point / 2 + OUT if abs(point[0]) == 2 else point + IN
for point in self.points
]
cube = Mobject(*[
Line(vertices[edge[0]], vertices[edge[1]])
for edge in self.graph.edges
])
cube.rotate(-np.pi/3, [0, 0, 1])
cube.rotate(-np.pi/3, [0, 1, 0])
dots_to_vertices = TextMobject("Dots $\\to$ Vertices").to_corner()
lines_to_edges = TextMobject("Lines $\\to$ Edges").to_corner()
regions_to_faces = TextMobject("Regions $\\to$ Faces").to_corner()
self.clear()
# self.play(TransformAnimations(
# Rotating(Dodecahedron(), **rot_kwargs),
# Rotating(cube, **rot_kwargs)
# ))
self.play(Rotating(cube, **rot_kwargs))
self.clear()
self.play(*[
Transform(l1, l2)
for l1, l2 in zip(cube.split(), self.edges)
])
self.wait()
self.add(dots_to_vertices)
self.play(*[
ShowCreation(dot, run_time=1.0)
for dot in self.vertices
])
self.wait(2)
self.remove(dots_to_vertices, *self.vertices)
self.add(lines_to_edges)
self.play(ApplyMethod(
Mobject(*self.edges).set_color, "yellow"
))
self.wait(2)
self.clear()
self.add(*self.edges)
self.add(regions_to_faces)
self.generate_regions()
for region in self.regions:
self.set_color_region(region)
self.wait(3.0)
class ThreePiecesOfTerminology(GraphScene):
def construct(self):
GraphScene.construct(self)
terms = cycles, spanning_trees, dual_graphs = [
TextMobject(phrase).shift(y*UP).to_edge()
for phrase, y in [
("Cycles", 3),
("Spanning Trees", 1),
("Dual Graphs", -1),
]
]
self.generate_spanning_tree()
scale_factor = 1.2
def accent(mobject, color="yellow"):
return mobject.scale_in_place(scale_factor).set_color(color)
def tone_down(mobject):
return mobject.scale_in_place(1.0/scale_factor).set_color("white")
self.add(accent(cycles))
self.trace_cycle(run_time=1.0)
self.wait()
tone_down(cycles)
self.remove(self.traced_cycle)
self.add(accent(spanning_trees))
self.play(ShowCreation(self.spanning_tree), run_time=1.0)
self.wait()
tone_down(spanning_trees)
self.remove(self.spanning_tree)
self.add(accent(dual_graphs, "red"))
self.generate_dual_graph()
for mob in self.mobjects:
mob.fade
self.play(*[
ShowCreation(mob, run_time=1.0)
for mob in self.dual_vertices + self.dual_edges
])
self.wait()
self.clear()
self.play(ApplyMethod(
Mobject(*terms).center
))
self.wait()
class WalkingRandolph(GraphScene):
args_list = [
(SampleGraph(), [0, 1, 7, 8]),
]
@staticmethod
def args_to_string(graph, path):
return str(graph) + "".join(map(str, path))
def __init__(self, graph, path, *args, **kwargs):
self.path = path
GraphScene.__init__(self, graph, *args, **kwargs)
def construct(self):
GraphScene.construct(self)
point_path = [self.points[i] for i in self.path]
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR)
randy.move_to(point_path[0])
for next, last in zip(point_path[1:], point_path):
self.play(
WalkPiCreature(randy, next),
ShowCreation(Line(last, next).set_color("yellow")),
run_time=2.0
)
self.randy = randy
class PathExamples(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
paths = [
(1, 2, 4, 5, 6),
(6, 7, 1, 3),
]
non_paths = [
[(0, 1), (7, 8), (5, 6), ],
[(5, 0), (0, 2), (0, 1)],
]
valid_path = TextMobject("Valid \\\\ Path").set_color("green")
not_a_path = TextMobject("Not a \\\\ Path").set_color("red")
for mob in valid_path, not_a_path:
mob.to_edge(UP)
kwargs = {"run_time": 1.0}
for path, non_path in zip(paths, non_paths):
path_lines = Mobject(*[
Line(
self.points[path[i]],
self.points[path[i+1]]
).set_color("yellow")
for i in range(len(path) - 1)
])
non_path_lines = Mobject(*[
Line(
self.points[pp[0]],
self.points[pp[1]],
).set_color("yellow")
for pp in non_path
])
self.remove(not_a_path)
self.add(valid_path)
self.play(ShowCreation(path_lines, **kwargs))
self.wait(2)
self.remove(path_lines)
self.remove(valid_path)
self.add(not_a_path)
self.play(ShowCreation(non_path_lines, **kwargs))
self.wait(2)
self.remove(non_path_lines)
class IntroduceCycle(WalkingRandolph):
args_list = [
(SampleGraph(), [0, 1, 3, 2, 0])
]
def construct(self):
WalkingRandolph.construct(self)
self.remove(self.randy)
encompassed_cycles = [
c for c in self.graph.region_cycles if set(c).issubset(self.path)]
regions = [
self.region_from_cycle(cycle)
for cycle in encompassed_cycles
]
for region in regions:
self.set_color_region(region)
self.wait()
class IntroduceRandolph(GraphScene):
def construct(self):
GraphScene.construct(self)
randy = Randolph().move_to((-3, 0, 0))
name = TextMobject("Randolph")
self.play(Transform(
randy,
deepcopy(randy).scale(
RANDOLPH_SCALE_FACTOR).move_to(self.points[0]),
))
self.wait()
name.shift((0, 1, 0))
self.add(name)
self.wait()
class DefineSpanningTree(GraphScene):
def construct(self):
GraphScene.construct(self)
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR).move_to(self.points[0])
dollar_signs = TextMobject("\\$\\$")
dollar_signs.scale(EDGE_ANNOTATION_SCALE_FACTOR)
dollar_signs = Mobject(*[
deepcopy(dollar_signs).shift(edge.get_center())
for edge in self.edges
])
unneeded = TextMobject("unneeded!")
unneeded.scale(EDGE_ANNOTATION_SCALE_FACTOR)
self.generate_spanning_tree()
def green_dot_at_index(index):
return Dot(
self.points[index],
radius=2*Dot.DEFAULT_RADIUS,
color="lightgreen",
)
def out_of_spanning_set(point_pair):
stip = self.spanning_tree_index_pairs
return point_pair not in stip and \
tuple(reversed(point_pair)) not in stip
self.add(randy)
self.accent_vertices(run_time=2.0)
self.add(dollar_signs)
self.wait(2)
self.remove(dollar_signs)
run_time_per_branch = 0.5
self.play(
ShowCreation(green_dot_at_index(0)),
run_time=run_time_per_branch
)
for pair in self.spanning_tree_index_pairs:
self.play(ShowCreation(
Line(
self.points[pair[0]],
self.points[pair[1]]
).set_color("yellow"),
run_time=run_time_per_branch
))
self.play(ShowCreation(
green_dot_at_index(pair[1]),
run_time=run_time_per_branch
))
self.wait(2)
unneeded_edges = list(filter(out_of_spanning_set, self.graph.edges))
for edge, limit in zip(unneeded_edges, list(range(5))):
line = Line(self.points[edge[0]], self.points[edge[1]])
line.set_color("red")
self.play(ShowCreation(line, run_time=1.0))
self.add(unneeded.center().shift(line.get_center() + 0.2*UP))
self.wait()
self.remove(line, unneeded)
class NamingTree(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_spanning_tree()
self.generate_treeified_spanning_tree()
branches = self.spanning_tree.split()
branches_copy = deepcopy(branches)
treeified_branches = self.treeified_spanning_tree.split()
tree = TextMobject("``Tree''").to_edge(UP)
spanning_tree = TextMobject("``Spanning Tree''").to_edge(UP)
self.add(*branches)
self.play(
FadeOut(Mobject(*self.edges + self.vertices)),
Animation(Mobject(*branches)),
)
self.clear()
self.add(tree, *branches)
self.wait()
self.play(*[
Transform(b1, b2, run_time=2)
for b1, b2 in zip(branches, treeified_branches)
])
self.wait()
self.play(*[
FadeIn(mob)
for mob in self.edges + self.vertices
] + [
Transform(b1, b2, run_time=2)
for b1, b2 in zip(branches, branches_copy)
])
self.accent_vertices(run_time=2)
self.remove(tree)
self.add(spanning_tree)
self.wait(2)
class DualGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.add(TextMobject("Dual Graph").to_edge(UP).shift(2*LEFT))
self.play(*[
ShowCreation(mob)
for mob in self.dual_edges + self.dual_vertices
])
self.wait()
class FacebookLogo(Scene):
def construct(self):
im = ImageMobject("facebook_full_logo", invert=False)
self.add(im.scale(0.7))
class FacebookGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
account = ImageMobject("facebook_silhouette", invert=False)
account.scale(0.05)
logo = ImageMobject("facebook_logo", invert=False)
logo.scale(0.1)
logo.shift(0.2*LEFT + 0.1*UP)
account.add(logo).center()
account.shift(0.2*LEFT + 0.1*UP)
friends = TexMobject(
"\\leftarrow \\text{friends} \\rightarrow"
).scale(0.5*EDGE_ANNOTATION_SCALE_FACTOR)
self.clear()
accounts = [
deepcopy(account).shift(point)
for point in self.points
]
self.add(*accounts)
self.wait()
self.annotate_edges(friends)
self.wait()
self.play(*[
CounterclockwiseTransform(account, vertex)
for account, vertex in zip(accounts, self.vertices)
])
self.wait()
self.play(*[
Transform(ann, edge)
for ann, edge in zip(self.edge_annotations, self.edges)
])
self.wait()
class FacebookGraphAsAbstractSet(Scene):
def construct(self):
names = [
"Louis",
"Randolph",
"Mortimer",
"<NAME>",
"Penelope",
]
friend_pairs = [
(0, 1),
(0, 2),
(1, 2),
(3, 0),
(4, 0),
(1, 3),
(1, 2),
]
names_string = "\\\\".join(names + ["$\\vdots$"])
friends_string = "\\\\".join([
"\\text{%s}&\\leftrightarrow\\text{%s}" % (names[i], names[j])
for i, j in friend_pairs
] + ["\\vdots"])
names_mob = TextMobject(names_string).shift(3*LEFT)
friends_mob = TexMobject(
friends_string, size="\\Large"
).shift(3*RIGHT)
accounts = TextMobject("\\textbf{Accounts}")
accounts.shift(3*LEFT).to_edge(UP)
friendships = TextMobject("\\textbf{Friendships}")
friendships.shift(3*RIGHT).to_edge(UP)
lines = Mobject(
Line(UP*FRAME_Y_RADIUS, DOWN*FRAME_Y_RADIUS),
Line(LEFT*FRAME_X_RADIUS + 3*UP, RIGHT*FRAME_X_RADIUS + 3*UP)
).set_color("white")
self.add(accounts, friendships, lines)
self.wait()
for mob in names_mob, friends_mob:
self.play(ShowCreation(
mob, run_time=1.0
))
self.wait()
class ExamplesOfGraphs(GraphScene):
def construct(self):
buff = 0.5
self.graph.vertices = [v + DOWN + RIGHT for v in self.graph.vertices]
GraphScene.construct(self)
self.generate_regions()
objects, notions = Mobject(*TextMobject(
["Objects \\quad\\quad ", "Thing that connects objects"]
)).to_corner().shift(0.5*RIGHT).split()
horizontal_line = Line(
(-FRAME_X_RADIUS, FRAME_Y_RADIUS-1, 0),
(max(notions.points[:, 0]), FRAME_Y_RADIUS-1, 0)
)
vert_line_x_val = min(notions.points[:, 0]) - buff
vertical_line = Line(
(vert_line_x_val, FRAME_Y_RADIUS, 0),
(vert_line_x_val, -FRAME_Y_RADIUS, 0)
)
objects_and_notions = [
("Facebook accounts", "Friendship"),
("English Words", "Differ by One Letter"),
("Mathematicians", "Coauthorship"),
("Neurons", "Synapses"),
(
"Regions our graph \\\\ cuts the plane into",
"Shared edges"
)
]
self.clear()
self.add(objects, notions, horizontal_line, vertical_line)
for (obj, notion), height in zip(objects_and_notions, it.count(2, -1)):
obj_mob = TextMobject(obj, size="\\small").to_edge(LEFT)
not_mob = TextMobject(notion, size="\\small").to_edge(LEFT)
not_mob.shift((vert_line_x_val + FRAME_X_RADIUS)*RIGHT)
obj_mob.shift(height*UP)
not_mob.shift(height*UP)
if obj.startswith("Regions"):
self.handle_dual_graph(obj_mob, not_mob)
elif obj.startswith("English"):
self.handle_english_words(obj_mob, not_mob)
else:
self.add(obj_mob)
self.wait()
self.add(not_mob)
self.wait()
def handle_english_words(self, words1, words2):
words = list(map(TextMobject, ["graph", "grape", "gape", "gripe"]))
words[0].shift(RIGHT)
words[1].shift(3*RIGHT)
words[2].shift(3*RIGHT + 2*UP)
words[3].shift(3*RIGHT + 2*DOWN)
lines = [
Line(*pair)
for pair in [
(
words[0].get_center() + RIGHT*words[0].get_width()/2,
words[1].get_center() + LEFT*words[1].get_width()/2
), (
words[1].get_center() + UP*words[1].get_height()/2,
words[2].get_center() + DOWN*words[2].get_height()/2
), (
words[1].get_center() + DOWN*words[1].get_height()/2,
words[3].get_center() + UP*words[3].get_height()/2
)
]
]
comp_words = Mobject(*words)
comp_lines = Mobject(*lines)
self.add(words1)
self.play(ShowCreation(comp_words, run_time=1.0))
self.wait()
self.add(words2)
self.play(ShowCreation(comp_lines, run_time=1.0))
self.wait()
self.remove(comp_words, comp_lines)
def handle_dual_graph(self, words1, words2):
words1.set_color("yellow")
words2.set_color("yellow")
connected = TextMobject("Connected")
connected.set_color("lightgreen")
not_connected = TextMobject("Not Connected")
not_connected.set_color("red")
for mob in connected, not_connected:
mob.shift(self.points[3] + UP)
self.play(*[
ShowCreation(mob, run_time=1.0)
for mob in self.edges + self.vertices
])
self.wait()
for region in self.regions:
self.set_color_region(region)
self.add(words1)
self.wait()
self.reset_background()
self.add(words2)
region_pairs = it.combinations(self.graph.region_cycles, 2)
for x in range(6):
want_matching = (x % 2 == 0)
found = False
while True:
try:
cycle1, cycle2 = next(region_pairs)
except:
return
shared = set(cycle1).intersection(cycle2)
if len(shared) == 2 and want_matching:
break
if len(shared) != 2 and not want_matching:
break
for cycle in cycle1, cycle2:
index = self.graph.region_cycles.index(cycle)
self.set_color_region(self.regions[index])
if want_matching:
self.remove(not_connected)
self.add(connected)
tup = tuple(shared)
if tup not in self.graph.edges:
tup = tuple(reversed(tup))
edge = deepcopy(self.edges[self.graph.edges.index(tup)])
edge.set_color("red")
self.play(ShowCreation(edge), run_time=1.0)
self.wait()
self.remove(edge)
else:
self.remove(connected)
self.add(not_connected)
self.wait(2)
self.reset_background()
class DrawDualGraph(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_regions()
self.generate_dual_graph()
region_mobs = [
ImageMobject(disp.paint_region(reg, self.background), invert=False)
for reg in self.regions
]
for region, mob in zip(self.regions, region_mobs):
self.set_color_region(region, mob.get_color())
outer_region = self.regions.pop()
outer_region_mob = region_mobs.pop()
outer_dual_vertex = self.dual_vertices.pop()
internal_edges = [e for e in self.dual_edges if abs(e.start[0]) < FRAME_X_RADIUS and
abs(e.end[0]) < FRAME_X_RADIUS and
abs(e.start[1]) < FRAME_Y_RADIUS and
abs(e.end[1]) < FRAME_Y_RADIUS]
external_edges = [
e for e in self.dual_edges if e not in internal_edges]
self.wait()
self.reset_background()
self.set_color_region(outer_region, outer_region_mob.get_color())
self.play(*[
Transform(reg_mob, dot)
for reg_mob, dot in zip(region_mobs, self.dual_vertices)
])
self.wait()
self.reset_background()
self.play(ApplyFunction(
lambda p: (FRAME_X_RADIUS + FRAME_Y_RADIUS)*p/get_norm(p),
outer_region_mob
))
self.wait()
for edges in internal_edges, external_edges:
self.play(*[
ShowCreation(edge, run_time=2.0)
for edge in edges
])
self.wait()
class EdgesAreTheSame(GraphScene):
def construct(self):
GraphScene.construct(self)
self.generate_dual_graph()
self.remove(*self.vertices)
self.add(*self.dual_edges)
self.wait()
self.play(*[
Transform(*pair, run_time=2.0)
for pair in zip(self.dual_edges, self.edges)
])
self.wait()
self.add(
TextMobject("""
(Or at least I would argue they should \\\\
be thought of as the same thing.)
""", size="\\small").to_edge(UP)
)
self.wait()
class ListOfCorrespondances(Scene):
def construct(self):
buff = 0.5
correspondances = [
["Regions cut out by", "Vertices of"],
["Edges of", "Edges of"],
["Cycles of", "Connected components of"],
["Connected components of", "Cycles of"],
["Spanning tree in", "Complement of spanning tree in"],
["", "Dual of"],
]
for corr in correspondances:
corr[0] += " original graph"
corr[1] += " dual graph"
arrow = TexMobject("\\leftrightarrow", size="\\large")
lines = []
for corr, height in zip(correspondances, it.count(3, -1)):
left = TextMobject(corr[0], size="\\small")
right = TextMobject(corr[1], size="\\small")
this_arrow = deepcopy(arrow)
for mob in left, right, this_arrow:
mob.shift(height*UP)
arrow_xs = this_arrow.points[:, 0]
left.to_edge(RIGHT)
left.shift((min(arrow_xs) - FRAME_X_RADIUS, 0, 0))
right.to_edge(LEFT)
right.shift((max(arrow_xs) + FRAME_X_RADIUS, 0, 0))
lines.append(Mobject(left, right, this_arrow))
last = None
for line in lines:
self.add(line.set_color("yellow"))
if last:
last.set_color("white")
last = line
self.wait(1)
class CyclesCorrespondWithConnectedComponents(GraphScene):
args_list = [(SampleGraph(),)]
def construct(self):
GraphScene.construct(self)
self.generate_regions()
self.generate_dual_graph()
cycle = [4, 2, 1, 5, 4]
enclosed_regions = [0, 2, 3, 4]
dual_cycle = DUAL_CYCLE
enclosed_vertices = [0, 1]
randy = Randolph()
randy.scale(RANDOLPH_SCALE_FACTOR)
randy.move_to(self.points[cycle[0]])
lines_to_remove = []
for last, next in zip(cycle, cycle[1:]):
line = Line(self.points[last], self.points[next])
line.set_color("yellow")
self.play(
ShowCreation(line),
WalkPiCreature(randy, self.points[next]),
run_time=1.0
)
lines_to_remove.append(line)
self.wait()
self.remove(randy, *lines_to_remove)
for region in np.array(self.regions)[enclosed_regions]:
self.set_color_region(region)
self.wait(2)
self.reset_background()
lines = Mobject(*[
Line(self.dual_points[last], self.dual_points[next])
for last, next in zip(dual_cycle, dual_cycle[1:])
]).set_color("red")
self.play(ShowCreation(lines))
self.play(*[
Transform(v, Dot(
v.get_center(),
radius=3*Dot.DEFAULT_RADIUS
).set_color("green"))
for v in | np.array(self.vertices) | numpy.array |
#!/usr/bin/env python
# CREATED:2014-01-18 14:09:05 by <NAME> <<EMAIL>>
# unit tests for util routines
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import platform
import numpy as np
import scipy.sparse
import pytest
import warnings
import librosa
from test_core import srand
np.set_printoptions(precision=3)
def test_example_audio_file():
assert os.path.exists(librosa.util.example_audio_file())
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize("y", [np.random.randn(32)])
@pytest.mark.parametrize("axis", [0, -1])
def test_frame1d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(y, frame_length=frame_length, hop_length=hop_length, axis=axis)
if axis == -1:
y_frame = y_frame.T
for i in range(y_frame.shape[0]):
assert np.allclose(y_frame[i], y[i * hop_length : (i * hop_length + frame_length)])
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize(
"y, axis", [(np.asfortranarray(np.random.randn(16, 32)), -1), (np.ascontiguousarray(np.random.randn(16, 32)), 0)]
)
def test_frame2d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(y, frame_length=frame_length, hop_length=hop_length, axis=axis)
if axis == -1:
y_frame = y_frame.T
y = y.T
for i in range(y_frame.shape[0]):
assert np.allclose(y_frame[i], y[i * hop_length : (i * hop_length + frame_length)])
def test_frame_0stride():
x = np.arange(10)
xpad = x[np.newaxis]
xpad2 = np.atleast_2d(x)
xf = librosa.util.frame(x, 3, 1)
xfpad = librosa.util.frame(xpad, 3, 1)
xfpad2 = librosa.util.frame(xpad2, 3, 1)
assert np.allclose(xf, xfpad)
assert np.allclose(xf, xfpad2)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_badtype():
librosa.util.frame([1, 2, 3, 4], frame_length=2, hop_length=1)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("x", [np.arange(16)])
def test_frame_too_short(x, axis):
librosa.util.frame(x, frame_length=17, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_bad_hop():
librosa.util.frame(np.arange(16), frame_length=4, hop_length=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [1, 2])
def test_frame_bad_axis(axis):
librosa.util.frame(np.zeros((3, 3, 3)), frame_length=2, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x, axis", [(np.zeros((4, 4), order="C"), -1), (np.zeros((4, 4), order="F"), 0)])
def test_frame_bad_contiguity(x, axis):
librosa.util.frame(x, frame_length=2, hop_length=1, axis=axis)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
def test_pad_center(y, m, axis, mode):
n = m + y.shape[axis]
y_out = librosa.util.pad_center(y, n, axis=axis, mode=mode)
n_len = y.shape[axis]
n_pad = int((n - n_len) / 2)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(n_pad, n_pad + n_len)
assert np.allclose(y, y_out[tuple(eq_slice)])
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("n", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_pad_center_fail(y, n, axis, mode):
librosa.util.pad_center(y, n, axis=axis, mode=mode)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [-5, 0, 5])
@pytest.mark.parametrize("axis", [0, -1])
def test_fix_length(y, m, axis):
n = m + y.shape[axis]
y_out = librosa.util.fix_length(y, n, axis=axis)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(y.shape[axis])
if n > y.shape[axis]:
assert np.allclose(y, y_out[tuple(eq_slice)])
else:
assert np.allclose(y[tuple(eq_slice)], y)
@pytest.mark.parametrize("frames", [np.arange(20, 100, step=15)])
@pytest.mark.parametrize("x_min", [0, 20])
@pytest.mark.parametrize("x_max", [20, 70, 120])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames(frames, x_min, x_max, pad):
f_fix = librosa.util.fix_frames(frames, x_min=x_min, x_max=x_max, pad=pad)
if x_min is not None:
if pad:
assert f_fix[0] == x_min
assert np.all(f_fix >= x_min)
if x_max is not None:
if pad:
assert f_fix[-1] == x_max
assert np.all(f_fix <= x_max)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("frames", [np.arange(-20, 100)])
@pytest.mark.parametrize("x_min", [None, 0, 20])
@pytest.mark.parametrize("x_max", [None, 0, 20])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames_fail_negative(frames, x_min, x_max, pad):
librosa.util.fix_frames(frames, x_min, x_max, pad)
@pytest.mark.parametrize("norm", [np.inf, -np.inf, 0, 0.5, 1.0, 2.0, None])
@pytest.mark.parametrize("ndims,axis", [(1, 0), (1, -1), (2, 0), (2, 1), (2, -1), (3, 0), (3, 1), (3, 2), (3, -1)])
def test_normalize(ndims, norm, axis):
srand()
X = np.random.randn(*([4] * ndims))
X_norm = librosa.util.normalize(X, norm=norm, axis=axis)
# Shape and dtype checks
assert X_norm.dtype == X.dtype
assert X_norm.shape == X.shape
if norm is None:
assert np.allclose(X, X_norm)
return
X_norm = np.abs(X_norm)
if norm == np.inf:
values = np.max(X_norm, axis=axis)
elif norm == -np.inf:
values = np.min(X_norm, axis=axis)
elif norm == 0:
# XXX: normalization here isn't quite right
values = | np.ones(1) | numpy.ones |
# -*- coding: utf-8 -*-
"""
ICRF T-Resonator
Calculates the voltage and current and S11 for a given configuration
"""
import tresonator as T
import matplotlib.pyplot as plt
import numpy as np
f = 62.64e6 # Hz
P_in = 80e3 # W
# setup the initial resonator configuration, in which L_DUT and L_CEA
# are not the necessary optimum values
Lsc_DUT = 0.035 # m
Lsc_CEA = 0.027 # m
cfg = T.Configuration(f, P_in, Lsc_DUT, Lsc_CEA, additional_losses=1)
# Calculates the voltage and current along the transmission lines
L_CEA, L_DUT, V_CEA, V_DUT, I_CEA, I_DUT = cfg.voltage_current()
# Plotting V,I
fig, ax = plt.subplots(2,1, sharex=True)
ax[0].plot(-L_DUT, np.abs(V_DUT)/1e3, L_CEA, np.abs(V_CEA)/1e3, lw=2)
ax[0].set_ylim(0, 45)
ax[0].grid(True)
ax[0].set_xlim(min(-L_DUT), max(L_CEA))
ax[0].axvline(x=cfg.L_Vprobe_CEA_fromT, ls='--', color='gray', lw=3)
ax[0].axvline(x=-cfg.L_Vprobe_DUT_fromT, ls='--', color='gray', lw=3)
ax[0].set_ylabel('|V| [kV]', fontsize=14)
ax[1].plot(-L_DUT, np.abs(I_DUT), L_CEA, | np.abs(I_CEA) | numpy.abs |
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from models import SEM, GRUEvent, clear_sem
from scipy.stats import multivariate_normal
from scipy.special import logsumexp
def segment_video(event_sequence, sem_kwargs):
"""
:param event_sequence: (NxD np.array) the sequence of N event vectors in D dimensions
:param sem_kwargs: (dict) all of the parameters for SEM
:return:
"""
sem_model = SEM(**sem_kwargs)
sem_model.run(event_sequence, k=event_sequence.shape[0], leave_progress_bar=True)
log_posterior = sem_model.results.log_like + sem_model.results.log_prior
# clean up memory
clear_sem(sem_model)
sem_model = None
return log_posterior
def bin_times(array, max_seconds, bin_size=1.0):
""" Helper function to learn the bin the subject data"""
cumulative_binned = [np.sum(array <= t0 * 1000) for t0 in np.arange(bin_size, max_seconds + bin_size, bin_size)]
binned = np.array(cumulative_binned)[1:] - np.array(cumulative_binned)[:-1]
binned = np.concatenate([[cumulative_binned[0]], binned])
return binned
def load_comparison_data(data, bin_size=1.0):
# Movie A is Saxaphone (185s long)
# Movie B is making a bed (336s long)
# Movie C is doing dishes (255s long)
# here, we'll collapse over all of the groups (old, young; warned, unwarned) for now
n_subjs = len(set(data.SubjNum))
sax_times = np.sort(list(set(data.loc[data.Movie == 'A', 'MS']))).astype(np.float32)
binned_sax = bin_times(sax_times, 185, bin_size) / np.float(n_subjs)
bed_times = np.sort(list(set(data.loc[data.Movie == 'B', 'MS']))).astype(np.float32)
binned_bed = bin_times(bed_times, 336, bin_size) / np.float(n_subjs)
dishes_times = np.sort(list(set(data.loc[data.Movie == 'C', 'MS']))).astype(np.float32)
binned_dishes = bin_times(dishes_times, 255, bin_size) / np.float(n_subjs)
return binned_sax, binned_bed, binned_dishes
def get_binned_boundary_prop(e_hat, log_post, bin_size=1.0, frequency=30.0):
"""
:param results: SEM.Results
:param bin_size: seconds
:param frequency: in Hz
:return:
"""
# normalize
log_post0 = log_post - np.tile(np.max(log_post, axis=1).reshape(-1, 1), (1, log_post.shape[1]))
log_post0 -= np.tile(logsumexp(log_post0, axis=1).reshape(-1, 1), (1, log_post.shape[1]))
boundary_probability = [0]
for ii in range(1, log_post0.shape[0]):
idx = range(log_post0.shape[0])
idx.remove(e_hat[ii - 1])
boundary_probability.append(logsumexp(log_post0[ii, idx]))
boundary_probability = np.array(boundary_probability)
frame_time = np.arange(1, len(boundary_probability) + 1) / float(frequency)
index = np.arange(0, np.max(frame_time), bin_size)
boundary_probability_binned = []
for t in index:
boundary_probability_binned.append(
# note: this operation is equivalent to the log of the average boundary probability in the window
logsumexp(boundary_probability[(frame_time >= t) & (frame_time < (t + bin_size))]) - \
np.log(bin_size * 30.)
)
boundary_probability_binned = pd.Series(boundary_probability_binned, index=index)
return boundary_probability_binned
def get_binned_boundaries(e_hat, bin_size=1.0, frequency=30.0):
""" get the binned boundaries from the model"""
frame_time = np.arange(1, len(e_hat) + 1) / float(frequency)
index = np.arange(0, np.max(frame_time), bin_size)
boundaries = np.concatenate([[0], e_hat[1:] !=e_hat[:-1]])
boundaries_binned = []
for t in index:
boundaries_binned.append(np.sum(
boundaries[(frame_time >= t) & (frame_time < (t + bin_size))]
))
return np.array(boundaries_binned, dtype=bool)
def get_point_biserial(boundaries_binned, binned_comp):
M_1 = np.mean(binned_comp[boundaries_binned == 1])
M_0 = np.mean(binned_comp[boundaries_binned == 0])
n_1 = np.sum(boundaries_binned == 1)
n_0 = np.sum(boundaries_binned == 0)
n = n_1 + n_0
s = np.std(binned_comp)
r_pb = (M_1 - M_0) / s * np.sqrt(n_1 * n_0 / (float(n)**2))
return r_pb
def get_subjs_rpb(data, bin_size=1.0):
"""get the distribution of subjects' point bi-serial correlation coeffs"""
grouped_data = np.concatenate(load_comparison_data(data))
r_pbs = []
for sj in set(data.SubjNum):
_binned_sax = bin_times(data.loc[(data.SubjNum == sj) & (data.Movie == 'A'), 'MS'], 185, 1.0)
_binned_bed = bin_times(data.loc[(data.SubjNum == sj) & (data.Movie == 'B'), 'MS'], 336, 1.0)
_binned_dishes = bin_times(data.loc[(data.SubjNum == sj) & (data.Movie == 'C'), 'MS'], 255, 1.0)
subs = np.concatenate([_binned_sax, _binned_bed, _binned_dishes])
r_pbs.append(get_point_biserial(subs, grouped_data))
return r_pbs
def plot_boundaries(binned_subj_data, binned_model_bounds, label, batch=0):
# boundaries = get_binned_boundaries(log_poseterior)
# boundaries = binned_model_bounds
plt.figure(figsize=(4.5, 2.0))
plt.plot(binned_subj_data, label='Subject Boundaries')
plt.xlabel('Time (seconds)')
plt.ylabel('Boundary Probability')
b = np.arange(len(binned_model_bounds))[binned_model_bounds][0]
plt.plot([b, b], [0, 1], 'k:', label='Model Boundary', alpha=0.75)
for b in np.arange(len(binned_model_bounds))[binned_model_bounds][1:]:
plt.plot([b, b], [0, 1], 'k:', alpha=0.75)
plt.legend(loc='upper right', framealpha=1.0)
plt.ylim([0, 0.6])
plt.title('"' + label + '"')
sns.despine()
plt.savefig('video_segmentation_{}_batch_{}.png'.format(label.replace(" ", ""), batch),
dpi=600, bbox_inches='tight')
def convert_type_token(event_types):
tokens = [0]
for ii in range(len(event_types)-1):
if event_types[ii] == event_types[ii+1]:
tokens.append(tokens[-1])
else:
tokens.append(tokens[-1] + 1)
return tokens
def get_event_duration(event_types, frequency=30):
tokens = convert_type_token(event_types)
n_tokens = np.max(tokens)+1
lens = []
for ii in range(n_tokens):
lens.append(np.sum(np.array(tokens) == ii))
return | np.array(lens, dtype=float) | numpy.array |
from __future__ import absolute_import
from . import _agglo as __agglo
from ._agglo import *
import numpy
__all__ = []
for key in __agglo.__dict__.keys():
__all__.append(key)
try:
__agglo.__dict__[key].__module__='nifty.graph.agglo'
except:
pass
from ...tools import makeDense as __makeDense
def updateRule(name, **kwargs):
if name in ['max', 'single_linkage']:
return MaxSettings()
elif name in ['mutex_watershed', 'abs_max']:
return MutexWatershedSettings()
elif name in ['min', 'complete_linkage']:
return MinSettings()
elif name == 'sum':
return SumSettings()
elif name in ['mean', 'average', 'avg']:
return ArithmeticMeanSettings()
elif name in ['gmean', 'generalized_mean']:
p = kwargs.get('p',1.0)
return GeneralizedMeanSettings(p=float(p))
elif name in ['smax', 'smooth_max']:
p = kwargs.get('p',0.0)
return SmoothMaxSettings(p=float(p))
elif name in ['rank','quantile', 'rank_order']:
q = kwargs.get('q',0.5)
numberOfBins = kwargs.get('numberOfBins',40)
return RankOrderSettings(q=float(q), numberOfBins=int(numberOfBins))
else:
return NotImplementedError("not yet implemented")
def get_GASP_policy(graph,
signed_edge_weights,
linkage_criteria = 'mean',
linkage_criteria_kwargs = None,
add_cannot_link_constraints= False,
edge_sizes = None,
is_mergeable_edge = None,
node_sizes = None,
size_regularizer = 0.0,
number_of_nodes_to_stop = 1,
merge_constrained_edges_at_the_end=False,
collect_stats_for_exported_data=False
):
linkage_criteria_kwargs = {} if linkage_criteria_kwargs is None else linkage_criteria_kwargs
parsed_rule = updateRule(linkage_criteria, **linkage_criteria_kwargs)
edge_sizes = numpy.ones_like(signed_edge_weights) if edge_sizes is None else edge_sizes
is_mergeable_edge = | numpy.ones_like(signed_edge_weights) | numpy.ones_like |
from collections import OrderedDict
import numpy as np
import collections
from copy import deepcopy
import random
import robosuite.utils.transform_utils as T
from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements, new_site
from robosuite.utils.mjcf_utils import CustomMaterial
from robosuite.environments.manipulation.single_arm_env import SingleArmEnv
from robosuite.models.arenas import TableArena
from robosuite.models.objects import BoxObject, CylinderObject, PlateWithHoleObject
from robosuite.models.tasks import ManipulationTask
from robosuite.utils.placement_samplers import UniformRandomSampler
from robosuite.utils.observables import Observable, sensor
class Lift(SingleArmEnv):
"""
This class corresponds to the lifting task for a single robot arm.
Args:
robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env
(e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms)
Note: Must be a single single-arm robot!
env_configuration (str): Specifies how to position the robots within the environment (default is "default").
For most single arm environments, this argument has no impact on the robot setup.
controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a
custom controller. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
gripper_types (str or list of str): type of gripper, used to instantiate
gripper models from gripper factory. Default is "default", which is the default grippers(s) associated
with the robot(s) the 'robots' specification. None removes the gripper, and any other (valid) model
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
:`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial
joint positions. Setting this value to `None` or 0.0 results in no noise being applied.
If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied,
If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range
:`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform"
Should either be single dict if same noise value is to be used for all robots or else it should be a
list of the same length as "robots" param
:Note: Specifying "default" will automatically use the default noise settings.
Specifying None will automatically create the required dict with "magnitude" set to 0.0.
table_full_size (3-tuple): x, y, and z dimensions of the table.
table_friction (3-tuple): the three mujoco friction parameters for
the table.
use_camera_obs (bool): if True, every observation includes rendered image(s)
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
placement_initializer (ObjectPositionSampler): if provided, will
be used to place objects on every reset, else a UniformRandomSampler
is used by default.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering
render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
the user using the mouse
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise.
render_gpu_device_id (int): corresponds to the GPU device id to use for offscreen rendering.
Defaults to -1, in which case the device will be inferred from environment variables
(GPUS or CUDA_VISIBLE_DEVICES).
control_freq (float): how many control signals to receive in every second. This sets the amount of
simulation time that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else,
only calls sim.reset and resets all robosuite-internal variables
camera_names (str or list of str): name of camera to be rendered. Should either be single str if
same name is to be used for all cameras' rendering or else it should be a list of cameras to render.
:Note: At least one camera must be specified if @use_camera_obs is True.
:Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the
convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each
robot's camera list).
camera_heights (int or list of int): height of camera frame. Should either be single int if
same height is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_widths (int or list of int): width of camera frame. Should either be single int if
same width is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single
bool if same depth setting is to be used for all cameras or else it should be a list of the same length as
"camera names" param.
Raises:
AssertionError: [Invalid number of robots specified]
"""
def __init__(
self,
robots,
env_configuration="default",
controller_configs=None,
gripper_types="default",
initialization_noise="default",
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1., 5e-3, 1e-4),
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
reward_shaping=False,
placement_initializer=None,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
num_via_point=0,
dist_error=0.002,
angle_error=0,
tanh_value=2.0,
r_reach_value=0.94,
error_type='circle',
control_spec=36,
peg_radius=(0.0025, 0.0025), # (0.00125, 0.00125)
peg_length=0.12,
):
#min jerk param:
self.num_via_point = num_via_point
# settings for table top
self.via_point = OrderedDict()
self.table_full_size = table_full_size
self.table_friction = table_friction
self.table_offset = np.array((0, 0, 0.8))
# Save peg specs
self.peg_radius = peg_radius
self.peg_length = peg_length
self.dist_error = dist_error
self.angle_error = angle_error
# reward configuration
self.reward_scale = reward_scale
self.reward_shaping = reward_shaping
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# object placement initializer
self.placement_initializer = placement_initializer
super().__init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types="default",
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
dist_error=dist_error,
tanh_value=tanh_value,
r_reach_value=r_reach_value,
error_type=error_type,
control_spec=control_spec,
)
def reward(self, action=None):
"""
Reward function for the task.
Sparse un-normalized reward:
- a discrete reward of 100.0 is provided if the peg is inside the plate's hole
- Note that we enforce that it's inside at an appropriate angle (cos(theta) > 0.95).
Un-normalized summed components if using reward shaping:
- ????
Note that the final reward is normalized and scaled by reward_scale / 5.0 as
well so that the max score is equal to reward_scale
"""
# TODO - reward(self, action=None) - change this function
reward = 0
time_factor = (self.horizon - self.timestep) / self.horizon
# Right location and angle
if self._check_success() and self.num_via_point == 1:
# reward = self.horizon * time_factor
self.success += 1
if self.success == 2:
S = 1
return reward
# use a shaping reward
if self.reward_shaping:
# Grab relevant values
t, d, cos = self._compute_orientation()
# Reach a terminal state as quickly as possible
# reaching reward
reward += self.r_reach * 5 * cos # * time_factor
# Orientation reward
reward += self.hor_dist
# reward += 1 - np.tanh(2.0*d)
# reward += 1 - np.tanh(np.abs(t))
reward += cos
# if we're not reward shaping, we need to scale our sparse reward so that the max reward is identical
# to its dense version
else:
reward *= 5.0
if self.reward_scale is not None:
reward *= self.reward_scale
if (self.num_via_point == 1
and ((abs(self.hole_pos[0] - self.peg_pos[0]) > 0.014
or abs(self.hole_pos[1] - self.peg_pos[1]) > 0.014)
and self.peg_pos[2] < self.table_offset[2] + 0.1)
or self.horizon - self.timestep == 1
):
reward = 0 * -self.horizon / 3
# self.checked = (self.num_via_points-2)
# self.switch = 0
# self.switch_seq = 0
# self.success = 0
# # self.trans *= 3
# self.reset_via_point()
# self.built_min_jerk_traj()
return reward
def on_peg(self):
res = False
if (
abs(self.hole_pos[0] - self.peg_pos[0]) < 0.015
and abs(self.hole_pos[1] - self.peg_pos[1]) < 0.007
and abs(self.hole_pos[1] - self.peg_pos[1]) + abs(self.hole_pos[0] - self.peg_pos[0]) < 0.04
and self.peg_pos[2] < self.table_offset[2] + 0.05
):
res = True
return res
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
# Adjust base pose accordingly
xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
self.robots[0].robot_model.set_base_xpos(xpos)
# load model for table top workspace
mujoco_arena = TableArena(
table_full_size=self.table_full_size,
table_friction=self.table_friction,
table_offset=self.table_offset,
)
# Arena always gets set to zero origin
mujoco_arena.set_origin([0, 0, 0])
self.peg_radius = 0.025
self.peg_height = 0.12
self.peg_z_offset = 0.9
self.rotation = None
x_range = [-0.0, 0.0]
y_range = [-0.1, -0.1]
# initialize objects of interest
self.peg = CylinderObject(name='peg',
size=[self.peg_radius, self.peg_height],
density=1,
duplicate_collision_geoms=True,
rgba=[1, 0, 0, 1], joints=None)
# load peg object (returns extracted object in XML form)
peg_obj = self.peg.get_obj()
# set pegs position relative to place where it is being placed
peg_obj.set("pos", array_to_string((0, 0, -0.04)))
peg_obj.append(new_site(name="peg_site", pos=(0, 0, self.peg_height), size=(0.005,)))
# append the object top the gripper (attach body to body)
# main_eef = self.robots[0].robot_model.eef_name # 'robot0_right_hand'
main_eef = self.robots[0].gripper.bodies[1] # 'gripper0_eef' body
main_model = self.robots[0].robot_model # <robosuite.models.robots.manipulators.ur5e_robot.UR5e at 0x7fd9ead87ca0>
main_body = find_elements(root=main_model.worldbody, tags="body", attribs={"name": main_eef}, return_first=True)
main_body.append(peg_obj) # attach body to body
if self.rotation is None:
rot_angle = np.random.uniform(high=2 * np.pi, low=0)
elif isinstance(self.rotation, collections.Iterable):
rot_angle = np.random.uniform(
high=max(self.rotation), low=min(self.rotation)
)
else:
rot_angle = self.rotation
hole_rot_set = str(np.array([np.cos(rot_angle / 2), 0, 0, np.sin(rot_angle / 2)]))
hole_pos_set = np.array([np.random.uniform(high=x_range[0], low=x_range[1]), np.random.uniform(high=y_range[0], low=y_range[1]), 0.83])
hole_pos_str = ' '.join(map(str, hole_pos_set))
hole_rot_str = ' '.join(map(str, hole_rot_set))
self.hole = PlateWithHoleObject(name='hole')
hole_obj = self.hole.get_obj()
hole_obj.set("quat", hole_rot_str)
hole_obj.set("pos", hole_pos_str)
self.model = ManipulationTask(
mujoco_arena=mujoco_arena,
mujoco_robots=[robot.robot_model for robot in self.robots],
mujoco_objects=self.hole
)
# Make sure to add relevant assets from peg and hole objects
self.model.merge_assets(self.peg)
## Create placement initializer
# if self.placement_initializer is not None:
# self.placement_initializer.reset()
# self.placement_initializer.add_objects(self.peg)
# else:
# """Object samplers use the bottom_site and top_site sites of each object in order to place objects on top of other objects,
# and the horizontal_radius_site site in order to ensure that objects do not collide with one another. """
# task includes arena, robot, and objects of interest
# self.model = ManipulationTask(
# mujoco_arena=mujoco_arena,
# mujoco_robots=[robot.robot_model for robot in self.robots],
# mujoco_objects=[self.peg],
# )
# def _load_model(self):
# """
# Loads an xml model, puts it in self.model
# """
# super()._load_model()
#
# # Adjust base pose accordingly
# xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
# self.robots[0].robot_model.set_base_xpos(xpos)
#
# # load model for table top workspace
# mujoco_arena = TableArena(
# table_full_size=self.table_full_size,
# table_friction=self.table_friction,
# table_offset=self.table_offset,
# )
#
# # Arena always gets set to zero origin
# mujoco_arena.set_origin([0, 0, 0])
#
# # initialize objects of interest
# tex_attrib = {
# "type": "cube",
# }
# mat_attrib = {
# "texrepeat": "1 1",
# "specular": "0.4",
# "shininess": "0.1",
# }
# redwood = CustomMaterial(
# texture="WoodRed",
# tex_name="redwood",
# mat_name="redwood_mat",
# tex_attrib=tex_attrib,
# mat_attrib=mat_attrib,
# )
# # self.cube = BoxObject(
# # name="cube",
# # size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015],
# # size_max=[0.022, 0.022, 0.022], # [0.018, 0.018, 0.018])
# # rgba=[1, 0, 0, 1],
# # material=redwood,
# # )
# self.cube = PlateWithHoleObject(name="cube")
# # Create placement initializer
# if self.placement_initializer is not None:
# self.placement_initializer.reset()
# self.placement_initializer.add_objects(self.cube)
# else:
# self.placement_initializer = UniformRandomSampler(
# name="cube",
# mujoco_objects=self.cube,
# x_range=[-0.03, 0.03],
# y_range=[-0.03, 0.03],
# rotation=None,
# ensure_object_boundary_in_range=True,
# ensure_valid_placement=True,
# reference_pos=self.table_offset,
# z_offset=0.01,
# )
#
# self.placement_initializer.reset()
#
#
# # Add this nut to the placement initializerr
# self.placement_initializer.add_objects(self.cube)
# # task includes arena, robot, and objects of interest
# # self.hole = PlateWithHoleObject(name='hole',)
# # self.hole = PlateWith5mmHoleObject(name='peg_hole')
# # hole_obj = self.hole.get_obj()
# # hole_obj.set("quat", "0 0 0.707 0.707")
# # hole_obj.set("pos", "0.1 0.2 1.17")
#
# self.model = ManipulationTask(
# mujoco_arena=mujoco_arena,
# mujoco_robots=[robot.robot_model for robot in self.robots],
# mujoco_objects=self.cube,
# )
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._setup_references()
# Additional object references from this env
self.peg_body_id = self.sim.model.body_name2id(self.peg.root_body)
def _setup_observables(self):
"""
Sets up observables to be used for this environment. Creates object-based observables if enabled
Returns:
OrderedDict: Dictionary mapping observable names to its corresponding Observable object
"""
observables = super()._setup_observables()
# low-level object information
if self.use_object_obs:
# Get robot prefix and define observables modality
pf = self.robots[0].robot_model.naming_prefix
modality = "object"
# peg-related observables
@sensor(modality=modality)
def peg_pos(obs_cache):
return np.array(self.sim.data.body_xpos[self.peg_body_id])
@sensor(modality=modality)
def peg_quat(obs_cache):
return T.convert_quat(np.array(self.sim.data.body_xquat[self.peg_body_id]), to="xyzw")
@sensor(modality=modality)
def gripper_to_peg_pos(obs_cache):
return obs_cache[f"{pf}eef_pos"] - obs_cache["peg_pos"] if \
f"{pf}eef_pos" in obs_cache and "peg_pos" in obs_cache else np.zeros(3)
sensors = [peg_pos, peg_quat, gripper_to_peg_pos]
names = [s.__name__ for s in sensors]
# Create observables
for name, s in zip(names, sensors):
observables[name] = Observable(
name=name,
sensor=s,
sampling_rate=self.control_freq,
)
return observables
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
self.num_via_point = 0
self.success = 0
self.enter = 1
self.t_bias = 0
self.reset_via_point()
# Reset all object positions using initializer sampler if we're not directly loading from an xml
# if not self.deterministic_reset:
# Sample from the placement initializer for all objects
# object_placements = self.placement_initializer.sample()
#
# # Loop through all objects and reset their positions
# for obj_pos, obj_quat, obj in object_placements.values():
# self.sim.data.set_joint_qpos(obj.joints, np.concatenate([np.array(obj_pos), np.array(obj_quat)]))
def visualize(self, vis_settings):
"""
In addition to super call, visualize gripper site proportional to the distance to the peg.
Args:
vis_settings (dict): Visualization keywords mapped to T/F, determining whether that specific
component should be visualized. Should have "grippers" keyword as well as any other relevant
options specified.
"""
# Run superclass method first
super().visualize(vis_settings=vis_settings)
# Color the gripper visualization site according to its distance to the peg
if vis_settings["grippers"]:
self._visualize_gripper_to_target(gripper=self.robots[0].gripper, target=self.peg)
def _check_success(self):
"""
Check if peg is successfully aligned and placed within the hole
Returns:
bool: True if peg is placed in hole correctly
"""
# TODO - _check_success(self) - change this function
# calculat pegs end position.
self.r_reach = 0
self.hor_dist = 0
peg_mat = self.sim.data.body_xmat[self.peg_body_id]
peg_mat.shape = (3, 3)
peg_pos_center = self.sim.data.body_xpos[self.peg_body_id]
handquat = T.convert_quat(self.sim.data.get_body_xquat("robot0_right_hand"), to="xyzw")
handDCM = T.quat2mat(handquat)
self.peg_pos = self.sim.data.get_site_xpos(
"peg_site") # peg_pos_center + (handDCM @ [0, 0, 2*self.peg_length]).T
self.hole_pos = self.sim.data.get_site_xpos("hole_middle_cylinder")
hole_mat = self.sim.data.body_xmat[self.sim.model.body_name2id("hole_hole")]
hole_mat.shape = (3, 3)
dist = np.linalg.norm(self.peg_pos - self.hole_pos)
horizon_dist = | np.linalg.norm(self.peg_pos[:2] - self.hole_pos[:2]) | numpy.linalg.norm |
# encoding=utf8
"""Factory test case module."""
from unittest import TestCase
import numpy as np
from WeOptPy import Factory
from WeOptPy.task.interfaces import UtilityFunction
class NoLimits:
@classmethod
def function(cls):
def evaluate(D, x): return 0
return evaluate
class MyBenchmark(UtilityFunction):
def __init__(self):
UtilityFunction.__init__(self, -10, 10)
def function(self):
return lambda D, x, **kwargs: | np.sum(x ** 2) | numpy.sum |
import numpy as np
import os
import cv2 as cv
import matplotlib.pyplot as plt
import scipy.io as sio
import FaceDataIO as fdio
import tensortoolbox as ttl
from tensorly.decomposition import parafac
################################### Improved Method ###################################
def run():
database=np.load('TensorfaceParas.npy')[0]
if database=='yaleB':
##################################################### Trianning set
[subs,poses,illums]= | np.load('trainSet_paras.npy') | numpy.load |
"""Test schmidt_decomposition."""
import numpy as np
from toqito.state_ops import schmidt_decomposition
from toqito.states import max_entangled
def test_schmidt_decomp_max_ent():
"""Schmidt decomposition of the 3-D maximally entangled state."""
singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3))
expected_u_mat = np.identity(3)
expected_vt_mat = np.identity(3)
expected_singular_vals = 1 / np.sqrt(3) * np.array([[1], [1], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_dim_list():
"""Schmidt decomposition with list specifying dimension."""
singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3), dim=[3, 3])
expected_u_mat = np.identity(3)
expected_vt_mat = np.identity(3)
expected_singular_vals = 1 / np.sqrt(3) * np.array([[1], [1], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal( | np.all(bool_mat) | numpy.all |
from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
from dataclasses import dataclass
import matplotlib.pyplot as plt
from copy import deepcopy
import time
import pickle
import numpy as np
from concurrent.futures import ProcessPoolExecutor, wait
from tqdm import tqdm
from lightweaver.utils import NgOptions, get_default_molecule_path
from astropy.io import fits
from lightweaver.LwCompiled import BackgroundProvider
from lightweaver.witt import witt
class WittmanBackground(BackgroundProvider):
"""
Uses the background from the Wittmann EOS, ignores scattering (i.e. sets to 0)
"""
def __init__(self, eqPops, radSet, wavelength):
self.eqPops = eqPops
self.radSet = radSet
self.wavelength = wavelength
def compute_background(self, atmos, chi, eta, sca):
abundance = self.eqPops.abundance
wittAbundances = | np.array([abundance[e] for e in lw.PeriodicTable.elements]) | numpy.array |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
eval_models.py
Evaluation of models for those problems on a validation set.
"""
from __future__ import print_function, division
import torch
import scipy.linalg
import sys, os, time
import numpy as np
import matplotlib.pyplot as plt
import glob
import numba
import cPickle as pkl
from pyLib.io import getArgs
from pyLib.train import MoMNet, modelLoader, GaoNet
from pyLib.math import l1loss
import util
DEBUG = False
def main():
# pen, car, drone still means which problem we want to look into
# pcakmean specifies the clustering approach we intend to use
# error means we calculate evaluation error on the validation set
# constr means we evaluate constraint violation
# eval just evaluates data on validation set and save into a npz file for rollout validation
# snn means we evaluate SNN network
# roll means look into rollout results and extract useful information. It turns out they all fail, somehow
args = util.get_args('debug', 'error', 'constr', 'eval', 'snn', 'roll')
global DEBUG
if args.debug:
DEBUG = True
cfg, lbl_name = util.get_label_cfg_by_args(args)
if args.error:
eval_valid_error(cfg, lbl_name, args)
if args.constr:
eval_valid_constr_vio(cfg, lbl_name, args)
if args.eval:
eval_on_valid(cfg, lbl_name, args)
if args.roll:
check_rollout_results(cfg, lbl_name, args)
def check_rollout_results(cfg, lbl_name, args):
"""Check rollout results"""
uid = cfg['uniqueid']
if args.snn:
datanm = 'data/%s/snn_rollout_result.pkl' % uid
with open(datanm, 'rb') as f:
Rst = pkl.load(f)
keys = Rst.keys()
print(keys)
if args.dtwo or args.done or args.drone:
# load flag of validation set, if necessary
vdata = np.load(cfg['valid_path'])
vio = Rst['vio']
if 'flag' in vdata.keys():
mask = vdata['flag'] == 1
else:
mask = np.ones(vio.shape[0], dtype=bool)
print('valid set mask size ', np.sum(mask))
vio = vio[mask]
fig, ax = plt.subplots()
ax.hist(vio, bins=20)
plt.show()
print('mean vio ', np.sum(vio[vio < 0]) / vio.shape[0])
print('max vio ', np.amin(vio))
else:
datanm = 'data/%s/%s_rollout_result.pkl' % (uid, lbl_name)
datanm = datanm.replace('_label', '')
with open(datanm, 'rb') as f:
Rst = pkl.load(f)
if args.pen or args.car:
keys = Rst.keys()
keys.sort(key=int)
for key in keys:
print('key = ', key)
key_rst = Rst[key]
if args.pen:
status = np.array([tmp['status'] for tmp in key_rst])
print(np.sum(status == 1))
elif args.car:
vXf = np.array([rst['statef'] for rst in key_rst])
# fix for angle
vXf[:, 2] = np.mod(vXf[:, 2], 2*np.pi)
inds = vXf[:, 2] > np.pi
vXf[inds, 2] = 2 * np.pi - vXf[inds, 2]
normXf = np.linalg.norm(vXf, axis=1)
print(np.sum(normXf < 0.5))
elif args.dtwo or args.done or args.drone:
vvio = Rst['vio']
vdata = np.load(cfg['valid_path'])
if 'flag' in vdata.keys():
mask = vdata['flag'] == 1
else:
mask = | np.ones(vio.shape[0], dtype=bool) | numpy.ones |
from . import kepmsg, kepstat
import math
import numpy as np
from matplotlib import pyplot as plt
def location(shape):
"""shape the window, enforce absolute scaling, rotate the labels"""
# position first axes inside the plotting window
ax = plt.axes(shape)
# force tick labels to be absolute rather than relative
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
# rotate y labels by 90 deg
labels = ax.get_yticklabels()
return ax
def plot1d(x, y, cadence, lcolor, lwidth, fcolor, falpha, underfill):
"""plot a 1d distribution"""
# pad first and last points in case a fill is required
x = np.insert(x, [0], [x[0]])
x = np.append(x, [x[-1]])
y = np.insert(y, [0], [-1.0e10])
y = np.append(y, -1.0e10)
# plot data so that data gaps are not spanned by a line
ltime = np.array([], dtype='float64')
ldata = np.array([], dtype='float32')
for i in range(1, len(x)-1):
if x[i] - x[i - 1] < 2.0 * cadence / 86400:
ltime = np.append(ltime, x[i])
ldata = np.append(ldata, y[i])
else:
plt.plot(ltime, ldata, color=lcolor, linestyle='-',
linewidth=lwidth)
ltime = np.array([], dtype='float64')
ldata = np.array([], dtype='float32')
plt.plot(ltime, ldata, color=lcolor, linestyle='-', linewidth=lwidth)
# plot the fill color below data time series, with no data gaps
if underfill:
plt.fill(x, y, fc=fcolor, linewidth=0.0, alpha=falpha)
def RangeOfPlot(x, y, pad, origin):
"""determine data limits"""
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
xr = xmax - xmin
yr = ymax - ymin
plt.xlim(xmin - xr * pad, xmax + xr * pad)
plt.ylim(ymin - yr * pad, ymax + yr * pad)
if origin:
if ymin - yr * pad <= 0.0:
plt.ylim(1.0e-10, ymax + yr * pad)
else:
plt.ylim(ymin - yr * pad, ymax + yr * pad)
def cleanx(time, logfile, verbose):
"""clean up x-axis of plot"""
try:
time0 = float(int(time[0] / 100) * 100.0)
if time0 < 2.4e6:
time0 += 2.4e6
timeout = time - time0
label = "BJD $-$ {}".format(time0)
except:
txt = ("ERROR -- KEPPLOT.CLEANX: cannot calculate plot scaling in "
"x dimension")
kepmsg.err(logfile, txt, verbose)
return timeout, label
def cleany(signal, cadence, logfile, verbose):
"""clean up y-axis of plot"""
try:
signal /= cadence
nrm = math.ceil(math.log10(np.nanmax(signal))) - 1.0
signal = signal / 10 ** nrm
if nrm == 0:
label = 'Flux (e$^-$ s$^{-1}$)'
else:
label = "Flux ($10^%d$" % nrm + "e$^-$ s$^{-1}$)"
except:
txt = ("ERROR -- KEPPLOT.CLEANY: cannot calculate plot scaling in "
"y dimension")
kepmsg.err(logfile, txt, verbose)
return signal, label
def limits(x, y, logfile, verbose):
"""plot limits"""
try:
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
xr = xmax - xmin
yr = ymax - ymin
x = np.insert(x, [0], [x[0]])
x = np.append(x, [x[-1]])
y = np.insert(y, [0], [0.0])
y = np.append(y, 0.0)
except:
txt = 'ERROR -- KEPPLOT.LIMITS: cannot calculate plot limits'
kepmsg.err(logfile, txt, verbose)
return x, y, xmin, xmax, xr, ymin, ymax, yr
def labels(xlab, ylab, labcol, fs):
"""plot labels"""
plt.xlabel(xlab, fontsize=fs, color=labcol)
plt.ylabel(ylab, fontsize=fs, color=labcol)
def intScale1D(image, imscale):
"""intensity scale limits of 1d array"""
nstat = 2; work2 = []
image = np.ma.array(image, mask=np.isnan(image))
work1 = np.array(np.sort(image), dtype=np.float32)
for i in range(len(work1)):
if 'nan' not in str(work1[i]).lower():
work2.append(work1[i])
work2 = np.array(work2, dtype=np.float32)
if int(float(len(work2)) / 10 + 0.5) > nstat:
nstat = int(float(len(work2)) / 10 + 0.5)
zmin = np.median(work2[:nstat])
zmax = np.median(work2[-nstat:])
if imscale == 'logarithmic':
if zmin < 0.0:
zmin = 100.0
if np.any(image <= 0):
image = np.log10(image + abs(image.min()) + 1)
else:
image = np.log10(image)
zmin = math.log10(zmin)
zmax = math.log10(zmax)
if imscale == 'squareroot':
if zmin < 0.0:
zmin = 100.0
if np.any(image < 0):
image = np.sqrt(image + abs(image.min()))
else:
image = np.sqrt(image)
zmin = math.sqrt(zmin)
zmax = math.sqrt(zmax)
return image, zmin, zmax
def intScale2D(image, imscale):
"""intensity scale limits of 2d array"""
nstat = 2
work1 = np.array([], dtype=np.float32)
(ysiz, xsiz) = np.shape(image)
for i in range(ysiz):
for j in range(xsiz):
if np.isfinite(image[i, j]) and image[i, j] > 0.0:
work1 = np.append(work1, image[i, j])
work2 = np.array(np.sort(work1))
if int(float(len(work2)) / 1000 + 0.5) > nstat:
nstat = int(float(len(work2)) / 1000 + 0.5)
zmin = np.median(work2[:nstat])
zmax = np.median(work2[-nstat:])
if imscale == 'logarithmic':
image = np.log10(image)
zmin = math.log10(zmin)
zmax = math.log10(zmax)
if imscale == 'squareroot':
image = np.sqrt(image)
zmin = math.sqrt(zmin)
zmax = math.sqrt(zmax)
return image, zmin, zmax
def borders(maskimg, xdim, ydim, pixcoord1, pixcoord2, bit, lcolor, lstyle, lwidth):
"""plot mask borders in CCD coordinates"""
for i in range(1, ydim):
for j in range(1, xdim):
if (kepstat.bitInBitmap(maskimg[i, j], bit) and not
kepstat.bitInBitmap(maskimg[i - 1, j], bit)):
x = np.array([pixcoord1[j - 1, i], pixcoord1[j, i]]) + 0.5
y = np.array([pixcoord2[j, i], pixcoord2[j , i]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if (not kepstat.bitInBitmap(maskimg[i, j], bit) and
kepstat.bitInBitmap(maskimg[i - 1, j], bit)):
x = np.array([pixcoord1[j - 1, i], pixcoord1[j, i]]) + 0.5
y = np.array([pixcoord2[j, i], pixcoord2[j, i]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if (kepstat.bitInBitmap(maskimg[i, j], bit) and not
kepstat.bitInBitmap(maskimg[i, j - 1], bit)):
x = np.array([pixcoord1[j, i], pixcoord1[j, i]]) - 0.5
y = np.array([pixcoord2[j, i - 1], pixcoord2[j, i]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if (not kepstat.bitInBitmap(maskimg[i, j], bit) and
kepstat.bitInBitmap(maskimg[i, j - 1], bit)):
x = np.array([pixcoord1[j, i], pixcoord1[j, i]]) - 0.5
y = np.array([pixcoord2[j, i - 1],pixcoord2[j, i]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
# corner cases
for j in range(ydim):
try:
if (kepstat.bitInBitmap(maskimg[j, 0], bit) and not
kepstat.bitInBitmap(maskimg[j - 1,0], bit)):
x = np.array([pixcoord1[0, j], pixcoord1[1, j]]) - 0.5
y = np.array([pixcoord2[0, j], pixcoord2[0, j]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
except:
pass
try:
if (not kepstat.bitInBitmap(maskimg[j + 1, 0], bit) and
kepstat.bitInBitmap(maskimg[j,0],bit)):
x = np.array([pixcoord1[0, j], pixcoord1[1, j]]) - 0.5
y = np.array([pixcoord2[0, j], pixcoord2[0, j]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
except:
pass
if kepstat.bitInBitmap(maskimg[j, 0], bit):
x = np.array([pixcoord1[0, j], pixcoord1[0, j]]) - 0.5
try:
y = np.array([pixcoord2[0, j], pixcoord2[0, j + 1]]) - 0.5
except:
y = np.array([pixcoord2[0, j - 1], pixcoord2[0, j]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[j, xdim - 1], bit):
x = np.array([pixcoord1[xdim - 1, j], pixcoord1[xdim - 1, j]]) + 0.5
try:
y = (np.array([pixcoord2[xdim - 1, j],
pixcoord2[xdim - 1, j + 1]]) - 0.5)
except:
y = (np.array([pixcoord2[xdim - 1, j - 1],
pixcoord2[xdim - 1, j]]) + 0.5)
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
for i in range(xdim):
try:
if (kepstat.bitInBitmap(maskimg[0, i], bit) and not
kepstat.bitInBitmap(maskimg[0, i - 1], bit)):
x = np.array([pixcoord1[i, 0], pixcoord1[i, 0]]) - 0.5
y = np.array([pixcoord2[i, 0], pixcoord2[i, 1]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle,
linewidth=lwidth)
except:
pass
try:
if (not kepstat.bitInBitmap(maskimg[0, i + 1], bit) and
kepstat.bitInBitmap(maskimg[0, i], bit)):
x = np.array([pixcoord1[i, 0], pixcoord1[i, 0]]) + 0.5
y = np.array([pixcoord2[i, 0], pixcoord2[i, 1]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
except:
pass
if kepstat.bitInBitmap(maskimg[0, i], bit):
try:
x = np.array([pixcoord1[i, 0], pixcoord1[i + 1, 0]]) - 0.5
except:
x = np.array([pixcoord1[i - 1, 0], pixcoord1[i, 0]]) + 0.5
y = np.array([pixcoord2[i, 0], pixcoord2[i, 0]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[ydim - 1, i], bit):
try:
x = (np.array([pixcoord1[i, ydim - 1],
pixcoord1[i + 1, ydim - 1]]) - 0.5)
except:
x = (np.array([pixcoord1[i - 1, ydim - 1],
pixcoord1[i, ydim - 1]]) - 0.5)
y = np.array([pixcoord2[i, ydim - 1], pixcoord2[i, ydim - 1]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[ydim - 1, xdim - 1], bit):
x = (np.array([pixcoord1[xdim - 2, ydim - 1],
pixcoord1[xdim - 1, ydim - 1]]) + 0.5)
y = (np.array([pixcoord2[xdim - 1, ydim - 1],
pixcoord2[xdim - 1, ydim - 1]]) + 0.5)
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[0, xdim - 1], bit):
x = np.array([pixcoord1[xdim - 1, 0], pixcoord1[xdim - 1, 0]]) + 0.5
y = np.array([pixcoord2[xdim - 1, 0], pixcoord2[xdim - 1, 1]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
return
def PrfBorders(maskimg,xdim,ydim,pixcoord1,pixcoord2,bit,lcolor,lstyle,lwidth):
"""plot mask borders in CCD coordinates"""
for i in range(1, ydim):
for j in range(1, xdim):
if (kepstat.bitInBitmap(maskimg[i, j], bit) and not
kepstat.bitInBitmap(maskimg[i - 1, j], bit)):
x = np.array([pixcoord1[j - 1, i], pixcoord1[j, i]]) + 0.5
y = np.array([pixcoord2[j, i], pixcoord2[j, i]]) - 0.5
plt.plot(x*50, y*50, color=lcolor, linestyle=lstyle,
linewidth=lwidth)
if (not kepstat.bitInBitmap(maskimg[i, j], bit) and
kepstat.bitInBitmap(maskimg[i - 1, j], bit)):
x = np.array([pixcoord1[j - 1, i], pixcoord1[j, i]]) + 0.5
y = np.array([pixcoord2[j , i], pixcoord2[j, i]]) - 0.5
plt.plot(x*50, y*50, color=lcolor, linestyle=lstyle,
linewidth=lwidth)
if (kepstat.bitInBitmap(maskimg[i, j], bit) and not
kepstat.bitInBitmap(maskimg[i, j - 1], bit)):
x = np.array([pixcoord1[j, i], pixcoord1[j, i]]) - 0.5
y = np.array([pixcoord2[j, i - 1], pixcoord2[j, i]]) + 0.5
plt.plot(x*50, y*50, color=lcolor, linestyle=lstyle,
linewidth=lwidth)
if (not kepstat.bitInBitmap(maskimg[i, j], bit) and
kepstat.bitInBitmap(maskimg[i, j - 1], bit)):
x = np.array([pixcoord1[j, i], pixcoord1[j, i]]) - 0.5
y = np.array([pixcoord2[j, i - 1], pixcoord2[j, i]]) + 0.5
plt.plot(x*50, y*50, color=lcolor, linestyle=lstyle,
linewidth=lwidth)
# corner cases
for j in range(ydim):
try:
if (kepstat.bitInBitmap(maskimg[j, 0], bit) and not
kepstat.bitInBitmap(maskimg[j - 1, 0], bit)):
x = np.array([pixcoord1[0, j], pixcoord1[1, j]]) - 0.5
y = np.array([pixcoord2[0, j], pixcoord2[0, j]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
except:
pass
try:
if (not kepstat.bitInBitmap(maskimg[j + 1, 0], bit) and
kepstat.bitInBitmap(maskimg[j, 0], bit)):
x = np.array([pixcoord1[0, j], pixcoord1[1, j]]) - 0.5
y = np.array([pixcoord2[0, j], pixcoord2[0, j]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
except:
pass
if kepstat.bitInBitmap(maskimg[j, 0], bit):
x = np.array([pixcoord1[0,j],pixcoord1[0,j]]) - 0.5
try:
y = np.array([pixcoord2[0, j], pixcoord2[0, j + 1]]) - 0.5
except:
y = np.array([pixcoord2[0, j - 1], pixcoord2[0, j]]) + 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[j, xdim - 1], bit):
x = np.array([pixcoord1[xdim - 1, j], pixcoord1[xdim - 1, j]]) + 0.5
try:
y = (np.array([pixcoord2[xdim - 1, j],
pixcoord2[xdim - 1, j + 1]]) - 0.5)
except:
y = (np.array([pixcoord2[xdim - 1, j - 1],
pixcoord2[xdim - 1, j]]) + 0.5)
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
for i in range(xdim):
try:
if (kepstat.bitInBitmap(maskimg[0, i], bit) and not
kepstat.bitInBitmap(maskimg[0, i - 1], bit)):
x = np.array([pixcoord1[i, 0], pixcoord1[i, 0]]) - 0.5
y = np.array([pixcoord2[i, 0], pixcoord2[i, 1]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
except:
pass
try:
if (not kepstat.bitInBitmap(maskimg[0, i + 1], bit) and
kepstat.bitInBitmap(maskimg[0, i], bit)):
x = np.array([pixcoord1[i, 0], pixcoord1[i, 0]]) + 0.5
y = np.array([pixcoord2[i, 0], pixcoord2[i, 1]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle,
linewidth=lwidth)
except:
pass
if kepstat.bitInBitmap(maskimg[0, i], bit):
try:
x = np.array([pixcoord1[i, 0], pixcoord1[i + 1, 0]]) - 0.5
except:
x = np.array([pixcoord1[i - 1, 0], pixcoord1[i, 0]]) + 0.5
y = np.array([pixcoord2[i,0],pixcoord2[i,0]]) - 0.5
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[ydim - 1, i], bit):
try:
x = (np.array([pixcoord1[i, ydim - 1],
pixcoord1[i + 1, ydim-1]]) - 0.5)
except:
x = (np.array([pixcoord1[i - 1, ydim - 1],
pixcoord1[i, ydim - 1]]) - 0.5)
y = (np.array([pixcoord2[i, ydim - 1],
pixcoord2[i, ydim - 1]]) + 0.5)
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[ydim - 1, xdim -1], bit):
x = (np.array([pixcoord1[xdim - 2, ydim - 1],
pixcoord1[xdim - 1, ydim - 1]]) + 0.5)
y = (np.array([pixcoord2[xdim - 1, ydim - 1],
pixcoord2[xdim - 1, ydim - 1]]) + 0.5)
plt.plot(x, y, color=lcolor, linestyle=lstyle, linewidth=lwidth)
if kepstat.bitInBitmap(maskimg[0, xdim - 1], bit):
x = np.array([pixcoord1[xdim - 1, 0], pixcoord1[xdim - 1, 0]]) + 0.5
y = | np.array([pixcoord2[xdim - 1, 0], pixcoord2[xdim - 1, 1]]) | numpy.array |
from tokenize import String
import numpy as np
import os
import sys
import pickle
import argparse
from tqdm import tqdm
sys.path.append('../')
from gan_models.dcgan.model import gen_random
#sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools'))
from attack_models.tools.utils import *
from sklearn.neighbors import NearestNeighbors
import tensorflow as tf
### Hyperparameters
K = 5
BATCH_SIZE = 10
flags = tf.compat.v1.flags
flags.DEFINE_string("z_dist", "normal01", "'normal01' or 'uniform_unsigned' or uniform_signed")
FLAGS = flags.FLAGS
#############################################################################################################
# get and save the arguments
#############################################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-name', type=str, required=True,
help='the name of the current experiment (used to set up the save_dir)')
parser.add_argument('--gan_model_dir', '-gdir', type=str, required=True,
help='directory for the Victim GAN model (save the generated.npz file)')
parser.add_argument('--pos_data_dir', '-posdir', type=str,
help='the directory for the positive (training) query images set')
parser.add_argument('--neg_data_dir', '-negdir', type=str,
help='the directory for the negative (testing) query images set')
parser.add_argument('--data_num', '-dnum', type=int, default=20000,
help='the number of query images to be considered')
parser.add_argument('--resolution', '-resolution', type=int, default=64,
help='generated image resolution')
return parser.parse_args()
def check_args(args):
'''
check and store the arguments as well as set up the save_dir
:param args: arguments
:return:
'''
## load dir
#assert os.path.exists(args.gan_model_dir)
## set up save_dir
save_dir = os.path.join(os.path.dirname(__file__), 'results/fbb', args.exp_name)
check_folder(save_dir)
## store the parameters
with open(os.path.join(save_dir, 'params.txt'), 'w') as f:
for k, v in vars(args).items():
f.writelines(k + ":" + str(v) + "\n")
print(k + ":" + str(v))
pickle.dump(vars(args), open(os.path.join(save_dir, 'params.pkl'), 'wb'), protocol=2)
return args, save_dir, args.gan_model_dir
#############################################################################################################
# main nearest neighbor search function
#############################################################################################################
def find_knn(nn_obj, query_imgs):
'''
:param nn_obj: Nearest Neighbor object
:param query_imgs: query images
:return:
dist: distance between query samples to its KNNs among generated samples
idx: index of the KNNs
'''
dist = []
idx = []
for i in tqdm(range(len(query_imgs) // BATCH_SIZE)):
x_batch = query_imgs[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
x_batch = | np.reshape(x_batch, [BATCH_SIZE, -1]) | numpy.reshape |
import tempfile
import matplotlib.colors as colors
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
def spy(*args, filename=None, **kwargs):
if filename is None:
return _plot(*args, **kwargs)
_write_png(filename, *args, **kwargs)
def _plot(A, border_width: int = 0, border_color="0.5", colormap=None):
with tempfile.NamedTemporaryFile() as fp:
_write_png(
fp.name,
A,
border_width=border_width,
border_color=border_color,
colormap=colormap,
)
img = mpimg.imread(fp.name)
plt.imshow(img, origin="upper", interpolation="nearest", cmap="gray")
return plt
def _write_png(filename, A, border_width: int = 0, border_color="0.5", colormap=None):
import png # pypng
iterator = RowIterator(A, border_width, border_color, colormap)
m, n = A.shape
w = png.Writer(
n + 2 * border_width,
m + 2 * border_width,
greyscale=iterator.mode != "rgb",
bitdepth=iterator.bitdepth,
)
with open(filename, "wb") as f:
w.write(f, iterator)
class RowIterator:
def __init__(self, A, border_width, border_color, colormap):
self.A = A.tocsr()
self.border_width = border_width
rgb = np.array(colors.to_rgb(border_color))
border_color_is_bw = np.all(rgb[0] == rgb) and rgb[0] in [0, 1]
border_color_is_gray = np.all(rgb[0] == rgb)
if colormap is None and (border_width == 0 or border_color_is_bw):
self.mode = "binary"
self.border_color = False
self.bitdepth = 1
self.dtype = bool
elif colormap is None and border_color_is_gray:
self.mode = "grayscale"
self.bitdepth = 8
self.dtype = np.uint8
self.border_color = np.uint8(np.round(rgb[0] * 255))
else:
self.mode = "rgb"
self.border_color = np.round(rgb * 255).astype(np.uint8)
self.dtype = np.uint8
self.bitdepth = 8
if colormap is None:
if self.mode == "binary":
def convert_values(idx, vals):
out = np.ones(self.A.shape[1], dtype=self.dtype)
out[idx] = False
return out
elif self.mode == "grayscale":
def convert_values(idx, vals):
out = np.full(self.A.shape[1], 255, dtype=self.dtype)
out[idx] = 0
return out
else:
assert self.mode == "rgb"
def convert_values(idx, vals):
out = | np.full((self.A.shape[1], 3), 255, dtype=self.dtype) | numpy.full |
import numpy as np
import pickle as pkl
import networkx as nx
from networkx.readwrite import json_graph
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
from sklearn.metrics import f1_score
import sys
import tensorflow as tf
import json
from time import time
import os, copy
flags = tf.app.flags
FLAGS = flags.FLAGS
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_gcn_data(dataset_str):
npz_file = 'data/{}_{}.npz'.format(dataset_str, FLAGS.normalization)
if os.path.exists(npz_file):
start_time = time()
print('Found preprocessed dataset {}, loading...'.format(npz_file))
data = np.load(npz_file)
num_data = data['num_data']
labels = data['labels']
train_data = data['train_data']
val_data = data['val_data']
test_data = data['test_data']
train_adj = sp.csr_matrix((data['train_adj_data'], data['train_adj_indices'], data['train_adj_indptr']), shape=data['train_adj_shape'])
full_adj = sp.csr_matrix((data['full_adj_data'], data['full_adj_indices'], data['full_adj_indptr']), shape=data['full_adj_shape'])
feats = sp.csr_matrix((data['feats_data'], data['feats_indices'], data['feats_indptr']), shape=data['feats_shape'])
train_feats = sp.csr_matrix((data['train_feats_data'], data['train_feats_indices'], data['train_feats_indptr']), shape=data['train_feats_shape'])
test_feats = sp.csr_matrix((data['test_feats_data'], data['test_feats_indices'], data['test_feats_indptr']), shape=data['test_feats_shape'])
print('Finished in {} seconds.'.format(time() - start_time))
else:
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
if dataset_str != 'nell':
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
else:
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
features = allx.tocsr()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = ally
idx_test = test_idx_reorder
idx_train = range(len(y))
idx_val = range(len(y), len(y)+969)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
# num_data, (v, coords), feats, labels, train_d, val_d, test_d
num_data = features.shape[0]
def _normalize_adj(adj):
rowsum = np.array(adj.sum(1)).flatten()
d_inv = 1.0 / (rowsum+1e-20)
d_mat_inv = sp.diags(d_inv, 0)
adj = d_mat_inv.dot(adj).tocoo()
coords = np.array((adj.row, adj.col)).astype(np.int32)
return adj.data.astype(np.float32), coords
def gcn_normalize_adj(adj):
adj = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj.sum(1)) + 1e-20
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt, 0)
adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)
adj = adj.tocoo()
coords = np.array((adj.row, adj.col)).astype(np.int32)
return adj.data.astype(np.float32), coords
# Normalize features
rowsum = np.array(features.sum(1)) + 1e-9
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv, 0)
features = r_mat_inv.dot(features)
if FLAGS.normalization == 'gcn':
full_v, full_coords = gcn_normalize_adj(adj)
else:
full_v, full_coords = _normalize_adj(adj)
full_v = full_v.astype(np.float32)
full_coords = full_coords.astype(np.int32)
train_v, train_coords = full_v, full_coords
labels = (y_train + y_val + y_test).astype(np.float32)
train_data = np.nonzero(train_mask)[0].astype(np.int32)
val_data = np.nonzero(val_mask)[0].astype(np.int32)
test_data = np.nonzero(test_mask)[0].astype(np.int32)
feats = (features.data, features.indices, features.indptr, features.shape)
def _get_adj(data, coords):
adj = sp.csr_matrix((data, (coords[0,:], coords[1,:])),
shape=(num_data, num_data))
return adj
train_adj = _get_adj(train_v, train_coords)
full_adj = _get_adj(full_v, full_coords)
feats = sp.csr_matrix((feats[0], feats[1], feats[2]),
shape=feats[-1], dtype=np.float32)
train_feats = train_adj.dot(feats)
test_feats = full_adj.dot(feats)
with open(npz_file, 'wb') as fwrite:
np.savez(fwrite, num_data=num_data,
train_adj_data=train_adj.data, train_adj_indices=train_adj.indices, train_adj_indptr=train_adj.indptr, train_adj_shape=train_adj.shape,
full_adj_data=full_adj.data, full_adj_indices=full_adj.indices, full_adj_indptr=full_adj.indptr, full_adj_shape=full_adj.shape,
feats_data=feats.data, feats_indices=feats.indices, feats_indptr=feats.indptr, feats_shape=feats.shape,
train_feats_data=train_feats.data, train_feats_indices=train_feats.indices, train_feats_indptr=train_feats.indptr, train_feats_shape=train_feats.shape,
test_feats_data=test_feats.data, test_feats_indices=test_feats.indices, test_feats_indptr=test_feats.indptr, test_feats_shape=test_feats.shape,
labels=labels,
train_data=train_data, val_data=val_data,
test_data=test_data)
return num_data, train_adj, full_adj, feats, train_feats, test_feats, labels, train_data, val_data, test_data
def load_graphsage_data(prefix, normalize=True):
version_info = map(int, nx.__version__.split('.'))
major = version_info[0]
minor = version_info[1]
assert (major <= 1) and (minor <= 11), "networkx major version must be <= 1.11 in order to load graphsage data"
# Save normalized version
if FLAGS.max_degree==-1:
npz_file = prefix + '.npz'
else:
npz_file = '{}_deg{}.npz'.format(prefix, FLAGS.max_degree)
if os.path.exists(npz_file):
start_time = time()
print('Found preprocessed dataset {}, loading...'.format(npz_file))
data = np.load(npz_file)
num_data = data['num_data']
feats = data['feats']
train_feats = data['train_feats']
test_feats = data['test_feats']
labels = data['labels']
train_data = data['train_data']
val_data = data['val_data']
test_data = data['test_data']
train_adj = sp.csr_matrix((data['train_adj_data'], data['train_adj_indices'], data['train_adj_indptr']), shape=data['train_adj_shape'])
full_adj = sp.csr_matrix((data['full_adj_data'], data['full_adj_indices'], data['full_adj_indptr']), shape=data['full_adj_shape'])
print('Finished in {} seconds.'.format(time() - start_time))
else:
print('Loading data...')
start_time = time()
G_data = json.load(open(prefix + "-G.json"))
G = json_graph.node_link_graph(G_data)
feats = np.load(prefix + "-feats.npy").astype(np.float32)
id_map = json.load(open(prefix + "-id_map.json"))
if id_map.keys()[0].isdigit():
conversion = lambda n: int(n)
else:
conversion = lambda n: n
id_map = {conversion(k):int(v) for k,v in id_map.iteritems()}
walks = []
class_map = json.load(open(prefix + "-class_map.json"))
if isinstance(class_map.values()[0], list):
lab_conversion = lambda n : n
else:
lab_conversion = lambda n : int(n)
class_map = {conversion(k): lab_conversion(v) for k,v in class_map.iteritems()}
## Remove all nodes that do not have val/test annotations
## (necessary because of networkx weirdness with the Reddit data)
broken_count = 0
to_remove = []
for node in G.nodes():
if not id_map.has_key(node):
#if not G.node[node].has_key('val') or not G.node[node].has_key('test'):
to_remove.append(node)
broken_count += 1
for node in to_remove:
G.remove_node(node)
print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))
# Construct adjacency matrix
print("Loaded data ({} seconds).. now preprocessing..".format(time()-start_time))
start_time = time()
edges = []
for edge in G.edges():
if id_map.has_key(edge[0]) and id_map.has_key(edge[1]):
edges.append((id_map[edge[0]], id_map[edge[1]]))
print('{} edges'.format(len(edges)))
num_data = len(id_map)
if FLAGS.max_degree != -1:
print('Subsampling edges...')
edges = subsample_edges(edges, num_data, FLAGS.max_degree)
val_data = np.array([id_map[n] for n in G.nodes()
if G.node[n]['val']], dtype=np.int32)
test_data = np.array([id_map[n] for n in G.nodes()
if G.node[n]['test']], dtype=np.int32)
is_train = np.ones((num_data), dtype=np.bool)
is_train[val_data] = False
is_train[test_data] = False
train_data = np.array([n for n in range(num_data) if is_train[n]], dtype=np.int32)
train_edges = [(e[0], e[1]) for e in edges if is_train[e[0]] and is_train[e[1]]]
edges = np.array(edges, dtype=np.int32)
train_edges = np.array(train_edges, dtype=np.int32)
# Process labels
if isinstance(class_map.values()[0], list):
num_classes = len(class_map.values()[0])
labels = np.zeros((num_data, num_classes), dtype=np.float32)
for k in class_map.keys():
labels[id_map[k], :] = np.array(class_map[k])
else:
num_classes = len(set(class_map.values()))
labels = np.zeros((num_data, num_classes), dtype=np.float32)
for k in class_map.keys():
labels[id_map[k], class_map[k]] = 1
if normalize:
from sklearn.preprocessing import StandardScaler
train_ids = np.array([id_map[n] for n in G.nodes()
if not G.node[n]['val'] and not G.node[n]['test']])
train_feats = feats[train_ids]
scaler = StandardScaler()
scaler.fit(train_feats)
feats = scaler.transform(feats)
def _normalize_adj(edges):
adj = sp.csr_matrix((np.ones((edges.shape[0]), dtype=np.float32),
(edges[:,0], edges[:,1])), shape=(num_data, num_data))
adj += adj.transpose()
rowsum = np.array(adj.sum(1)).flatten()
d_inv = 1.0 / (rowsum+1e-20)
d_mat_inv = sp.diags(d_inv, 0)
adj = d_mat_inv.dot(adj).tocoo()
coords = np.array((adj.row, adj.col)).astype(np.int32)
return adj.data, coords
train_v, train_coords = _normalize_adj(train_edges)
full_v, full_coords = _normalize_adj(edges)
def _get_adj(data, coords):
adj = sp.csr_matrix((data, (coords[0,:], coords[1,:])),
shape=(num_data, num_data))
return adj
train_adj = _get_adj(train_v, train_coords)
full_adj = _get_adj(full_v, full_coords)
train_feats = train_adj.dot(feats)
test_feats = full_adj.dot(feats)
print("Done. {} seconds.".format(time()-start_time))
with open(npz_file, 'wb') as fwrite:
print('Saving {} edges'.format(full_adj.nnz))
np.savez(fwrite, num_data=num_data,
train_adj_data=train_adj.data, train_adj_indices=train_adj.indices, train_adj_indptr=train_adj.indptr, train_adj_shape=train_adj.shape,
full_adj_data=full_adj.data, full_adj_indices=full_adj.indices, full_adj_indptr=full_adj.indptr, full_adj_shape=full_adj.shape,
feats=feats, train_feats=train_feats, test_feats=test_feats,
labels=labels,
train_data=train_data, val_data=val_data,
test_data=test_data)
return num_data, train_adj, full_adj, feats, train_feats, test_feats, labels, train_data, val_data, test_data
def load_youtube_data(prefix, ptrain):
npz_file = 'data/{}_{}.npz'.format(prefix, ptrain)
if os.path.exists(npz_file):
start_time = time()
print('Found preprocessed dataset {}, loading...'.format(npz_file))
data = np.load(npz_file)
num_data = data['num_data']
labels = data['labels']
train_data = data['train_data']
val_data = data['val_data']
test_data = data['test_data']
adj = sp.csr_matrix((data['adj_data'], data['adj_indices'], data['adj_indptr']),
shape=data['adj_shape'])
feats = sp.csr_matrix((data['feats_data'], data['feats_indices'], data['feats_indptr']),
shape=data['feats_shape'])
feats1 = sp.csr_matrix((data['feats1_data'], data['feats1_indices'], data['feats1_indptr']),
shape=data['feats1_shape'])
print('Finished in {} seconds.'.format(time() - start_time))
else:
start_time = time()
# read edges
with open('data/'+prefix+'/edges.csv') as f:
links = [link.split(',') for link in f.readlines()]
links = [(int(link[0])-1, int(link[1])-1) for link in links]
links = np.array(links).astype(np.int32)
num_data = np.max(links)+1
adj = sp.csr_matrix((np.ones(links.shape[0], dtype=np.float32),
(links[:,0], links[:,1])),
shape=(num_data, num_data))
adj = adj + adj.transpose()
def _normalize_adj(adj):
rowsum = np.array(adj.sum(1)).flatten()
d_inv = 1.0 / (rowsum+1e-20)
d_mat_inv = sp.diags(d_inv, 0)
adj = d_mat_inv.dot(adj)
return adj
adj = _normalize_adj(adj)
feats = sp.eye(num_data, dtype=np.float32).tocsr()
feats1 = adj.dot(feats)
num_classes = 47
labels = np.zeros((num_data, num_classes), dtype=np.float32)
with open('data/'+prefix+'/group-edges.csv') as f:
for line in f.readlines():
line = line.split(',')
labels[int(line[0])-1, int(line[1])-1] = 1
data = np.nonzero(labels.sum(1))[0].astype(np.int32)
np.random.shuffle(data)
n_train = int(len(data)*ptrain)
train_data = np.copy(data[:n_train])
val_data = np.copy(data[n_train:])
test_data = np.copy(data[n_train:])
num_data, adj, feats, feats1, labels, train_data, val_data, test_data = \
data_augmentation(num_data, adj, adj, feats, labels,
train_data, val_data, test_data)
print("Done. {} seconds.".format(time()-start_time))
with open(npz_file, 'wb') as fwrite:
np.savez(fwrite, num_data=num_data,
adj_data=adj.data, adj_indices=adj.indices,
adj_indptr=adj.indptr, adj_shape=adj.shape,
feats_data=feats.data, feats_indices=feats.indices,
feats_indptr=feats.indptr, feats_shape=feats.shape,
feats1_data=feats1.data, feats1_indices=feats1.indices,
feats1_indptr=feats1.indptr, feats1_shape=feats1.shape,
labels=labels,
train_data=train_data, val_data=val_data,
test_data=test_data)
return num_data, adj, feats, feats1, labels, train_data, val_data, test_data
def data_augmentation(num_data, train_adj, full_adj, feats, labels, train_data, val_data, test_data, n_rep=1):
if isinstance(feats, np.ndarray):
feats = np.tile(feats, [n_rep+1, 1])
else:
feats = sp.vstack([feats] * (n_rep+1))
labels = np.tile(labels, [n_rep+1, 1])
train_adj = train_adj.tocoo()
full_adj = full_adj.tocoo()
i = []
j = []
data = []
def add_adj(adj, t):
i.append(adj.row + t*num_data)
j.append(adj.col + t*num_data)
data.append(adj.data)
for t in range(n_rep):
add_adj(train_adj, t)
add_adj(full_adj, n_rep)
adj = sp.csr_matrix((np.concatenate(data), (np.concatenate(i), np.concatenate(j))),
shape=np.array(train_adj.shape)*(n_rep+1), dtype=train_adj.dtype)
new_train = []
for t in range(n_rep):
new_train.append(train_data + t*num_data)
train_data = np.concatenate(new_train)
val_data += n_rep * num_data
test_data += n_rep * num_data
return num_data*(n_rep+1), adj, feats, adj.dot(feats), labels, train_data, val_data, test_data
def np_dropout(feats, keep_prob):
mask = np.random.rand(feats.shape[0], feats.shape[1]) < keep_prob
return feats * mask.astype(np.float32) * (1.0 / keep_prob)
def np_sparse_dropout(feats, keep_prob):
feats = feats.tocoo()
mask = | np.random.rand(feats.data.shape[0]) | numpy.random.rand |
import unittest
import os
import numpy as np
from astropy import constants
import lal
import matplotlib.pyplot as plt
import bilby
from bilby.core import utils
class TestConstants(unittest.TestCase):
def test_speed_of_light(self):
self.assertEqual(utils.speed_of_light, lal.C_SI)
self.assertLess(
abs(utils.speed_of_light - constants.c.value) / utils.speed_of_light, 1e-16
)
def test_parsec(self):
self.assertEqual(utils.parsec, lal.PC_SI)
self.assertLess(abs(utils.parsec - constants.pc.value) / utils.parsec, 1e-11)
def test_solar_mass(self):
self.assertEqual(utils.solar_mass, lal.MSUN_SI)
self.assertLess(
abs(utils.solar_mass - constants.M_sun.value) / utils.solar_mass, 1e-4
)
def test_radius_of_earth(self):
self.assertEqual(bilby.core.utils.radius_of_earth, lal.REARTH_SI)
self.assertLess(
abs(utils.radius_of_earth - constants.R_earth.value)
/ utils.radius_of_earth,
1e-5,
)
def test_gravitational_constant(self):
self.assertEqual(bilby.core.utils.gravitational_constant, lal.G_SI)
class TestFFT(unittest.TestCase):
def setUp(self):
self.sampling_frequency = 10
def tearDown(self):
del self.sampling_frequency
def test_nfft_sine_function(self):
injected_frequency = 2.7324
duration = 100
times = utils.create_time_series(self.sampling_frequency, duration)
time_domain_strain = np.sin(2 * np.pi * times * injected_frequency + 0.4)
frequency_domain_strain, frequencies = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
frequency_at_peak = frequencies[np.argmax(np.abs(frequency_domain_strain))]
self.assertAlmostEqual(injected_frequency, frequency_at_peak, places=1)
def test_nfft_infft(self):
time_domain_strain = np.random.normal(0, 1, 10)
frequency_domain_strain, _ = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
new_time_domain_strain = bilby.core.utils.infft(
frequency_domain_strain, self.sampling_frequency
)
self.assertTrue(np.allclose(time_domain_strain, new_time_domain_strain))
class TestInferParameters(unittest.TestCase):
def setUp(self):
def source_function(freqs, a, b, *args, **kwargs):
return None
class TestClass:
def test_method(self, a, b, *args, **kwargs):
pass
class TestClass2:
def test_method(self, freqs, a, b, *args, **kwargs):
pass
self.source1 = source_function
test_obj = TestClass()
self.source2 = test_obj.test_method
test_obj2 = TestClass2()
self.source3 = test_obj2.test_method
def tearDown(self):
del self.source1
del self.source2
def test_args_kwargs_handling(self):
expected = ["a", "b"]
actual = utils.infer_parameters_from_function(self.source1)
self.assertListEqual(expected, actual)
def test_self_handling(self):
expected = ["a", "b"]
actual = utils.infer_args_from_method(self.source2)
self.assertListEqual(expected, actual)
def test_self_handling_method_as_function(self):
expected = ["a", "b"]
actual = utils.infer_parameters_from_function(self.source3)
self.assertListEqual(expected, actual)
class TestTimeAndFrequencyArrays(unittest.TestCase):
def setUp(self):
self.start_time = 1.3
self.sampling_frequency = 5
self.duration = 1.6
self.frequency_array = utils.create_frequency_series(
sampling_frequency=self.sampling_frequency, duration=self.duration
)
self.time_array = utils.create_time_series(
sampling_frequency=self.sampling_frequency,
duration=self.duration,
starting_time=self.start_time,
)
def tearDown(self):
del self.start_time
del self.sampling_frequency
del self.duration
del self.frequency_array
del self.time_array
def test_create_time_array(self):
expected_time_array = np.array([1.3, 1.5, 1.7, 1.9, 2.1, 2.3, 2.5, 2.7])
time_array = utils.create_time_series(
sampling_frequency=self.sampling_frequency,
duration=self.duration,
starting_time=self.start_time,
)
self.assertTrue(np.allclose(expected_time_array, time_array))
def test_create_frequency_array(self):
expected_frequency_array = np.array([0.0, 0.625, 1.25, 1.875, 2.5])
frequency_array = utils.create_frequency_series(
sampling_frequency=self.sampling_frequency, duration=self.duration
)
self.assertTrue(np.allclose(expected_frequency_array, frequency_array))
def test_get_sampling_frequency_from_time_array(self):
(
new_sampling_freq,
_,
) = utils.get_sampling_frequency_and_duration_from_time_array(self.time_array)
self.assertEqual(self.sampling_frequency, new_sampling_freq)
def test_get_sampling_frequency_from_time_array_unequally_sampled(self):
self.time_array[-1] += 0.0001
with self.assertRaises(ValueError):
_, _ = utils.get_sampling_frequency_and_duration_from_time_array(
self.time_array
)
def test_get_duration_from_time_array(self):
_, new_duration = utils.get_sampling_frequency_and_duration_from_time_array(
self.time_array
)
self.assertEqual(self.duration, new_duration)
def test_get_start_time_from_time_array(self):
new_start_time = self.time_array[0]
self.assertEqual(self.start_time, new_start_time)
def test_get_sampling_frequency_from_frequency_array(self):
(
new_sampling_freq,
_,
) = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
self.assertEqual(self.sampling_frequency, new_sampling_freq)
def test_get_sampling_frequency_from_frequency_array_unequally_sampled(self):
self.frequency_array[-1] += 0.0001
with self.assertRaises(ValueError):
_, _ = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
def test_get_duration_from_frequency_array(self):
(
_,
new_duration,
) = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
self.assertEqual(self.duration, new_duration)
def test_consistency_time_array_to_time_array(self):
(
new_sampling_frequency,
new_duration,
) = utils.get_sampling_frequency_and_duration_from_time_array(self.time_array)
new_start_time = self.time_array[0]
new_time_array = utils.create_time_series(
sampling_frequency=new_sampling_frequency,
duration=new_duration,
starting_time=new_start_time,
)
self.assertTrue(np.allclose(self.time_array, new_time_array))
def test_consistency_frequency_array_to_frequency_array(self):
(
new_sampling_frequency,
new_duration,
) = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
new_frequency_array = utils.create_frequency_series(
sampling_frequency=new_sampling_frequency, duration=new_duration
)
self.assertTrue(np.allclose(self.frequency_array, new_frequency_array))
def test_illegal_sampling_frequency_and_duration(self):
with self.assertRaises(utils.IllegalDurationAndSamplingFrequencyException):
_ = utils.create_time_series(
sampling_frequency=7.7, duration=1.3, starting_time=0
)
class TestReflect(unittest.TestCase):
def test_in_range(self):
xprime = np.array([0.1, 0.5, 0.9])
x = np.array([0.1, 0.5, 0.9])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
def test_in_one_to_two(self):
xprime = np.array([1.1, 1.5, 1.9])
x = np.array([0.9, 0.5, 0.1])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
def test_in_two_to_three(self):
xprime = np.array([2.1, 2.5, 2.9])
x = | np.array([0.1, 0.5, 0.9]) | numpy.array |
import anndata
try:
from anndata.base import Raw
except ImportError:
from anndata import Raw
import batchglm.api as glm
import dask
import logging
import numpy as np
import pandas as pd
import patsy
import scipy.sparse
from typing import Union, List, Dict, Callable, Tuple
from diffxpy import pkg_constants
from .det import DifferentialExpressionTestLRT, DifferentialExpressionTestWald, \
DifferentialExpressionTestTT, DifferentialExpressionTestRank, _DifferentialExpressionTestSingle, \
DifferentialExpressionTestVsRest, _DifferentialExpressionTestMulti, DifferentialExpressionTestByPartition
from .det_cont import DifferentialExpressionTestWaldCont, DifferentialExpressionTestLRTCont
from .det_pair import DifferentialExpressionTestZTestLazy, DifferentialExpressionTestZTest, \
DifferentialExpressionTestPairwiseStandard
from .utils import parse_gene_names, parse_sample_description, parse_size_factors, parse_grouping, \
constraint_system_from_star, preview_coef_names
def _fit(
noise_model,
data,
design_loc,
design_scale,
design_loc_names: list = None,
design_scale_names: list = None,
constraints_loc: np.ndarray = None,
constraints_scale: np.ndarray = None,
init_model=None,
init_a: Union[np.ndarray, str] = "AUTO",
init_b: Union[np.ndarray, str] = "AUTO",
gene_names=None,
size_factors=None,
batch_size: Union[None, int, Tuple[int, int]] = None,
backend: str = "numpy",
training_strategy: Union[str, List[Dict[str, object]], Callable] = "AUTO",
quick_scale: bool = None,
train_args: dict = {},
close_session=True,
dtype="float64"
):
"""
:param noise_model: str, noise model to use in model-based unit_test. Possible options:
- 'nb': default
:param design_loc: Design matrix of location model.
:param design_loc: Design matrix of scale model.
:param constraints_loc: : Constraints for location model.
Array with constraints in rows and model parameters in columns.
Each constraint contains non-zero entries for the a of parameters that
has to sum to zero. This constraint is enforced by binding one parameter
to the negative sum of the other parameters, effectively representing that
parameter as a function of the other parameters. This dependent
parameter is indicated by a -1 in this array, the independent parameters
of that constraint (which may be dependent at an earlier constraint)
are indicated by a 1.
:param constraints_scale: : Constraints for scale model.
Array with constraints in rows and model parameters in columns.
Each constraint contains non-zero entries for the a of parameters that
has to sum to zero. This constraint is enforced by binding one parameter
to the negative sum of the other parameters, effectively representing that
parameter as a function of the other parameters. This dependent
parameter is indicated by a -1 in this array, the independent parameters
of that constraint (which may be dependent at an earlier constraint)
are indicated by a 1.
:param init_model: (optional) If provided, this model will be used to initialize this Estimator.
:param init_a: (Optional) Low-level initial values for a.
Can be:
- str:
* "auto": automatically choose best initialization
* "standard": initialize intercept with observed mean
* "init_model": initialize with another model (see `ìnit_model` parameter)
* "closed_form": try to initialize with closed form
- np.ndarray: direct initialization of 'a'
:param init_b: (Optional) Low-level initial values for b
Can be:
- str:
* "auto": automatically choose best initialization
* "standard": initialize with zeros
* "init_model": initialize with another model (see `ìnit_model` parameter)
* "closed_form": try to initialize with closed form
- np.ndarray: direct initialization of 'b'
:param size_factors: 1D array of transformed library size factors for each cell in the
same order as in data
:param batch_size: Argument controlling the memory load of the fitting procedure. For backends that allow
chunking of operations, this parameter controls the size of the batch / chunk.
- If backend is "tf1" or "tf2": number of observations per batch
- If backend is "numpy": Tuple of (number of observations per chunk, number of genes per chunk)
:param backend: Which linear algebra library to chose. This impact the available noise models and optimizers /
training strategies. Available are:
- "numpy" numpy
- "tf1" tensorflow1.* >= 1.13
- "tf2" tensorflow2.*
:param training_strategy: {str} training strategy to use. Can be:
- str: will use Estimator.TrainingStrategy[training_strategy] to train
:param quick_scale: Depending on the optimizer, `scale` will be fitted faster and maybe less accurate.
Useful in scenarios where fitting the exact `scale` is not absolutely necessary.
:param train_args: Backend-specific parameter estimation (optimizer) settings. This is a dictionary, its
entries depend on the backend. These optimizer settings are set to defaults if not passed in this
dictionary.
- backend=="tf1":
- backend=="tf2":
- optimizer: str
- convergence_criteria: str
- stopping_criteria: str
- learning_rate: float
- batched_model: True
- backend=="numpy":
- nproc: int = 3: number of processes to use in steps of multiprocessing that require scipy.minimize.
Note that the number of processes in the steps only based on linear algebra functions may deviate.
:param dtype: Allows specifying the precision which should be used to fit data.
Should be "float32" for single precision or "float64" for double precision.
:param close_session: If True, will finalize the estimator. Otherwise, return the estimator itself.
"""
# Load estimator for required noise model and backend:
if backend.lower() in ["tf1"]:
if noise_model == "nb" or noise_model == "negative_binomial":
from batchglm.api.models.tf1.glm_nb import Estimator, InputDataGLM
elif noise_model == "norm" or noise_model == "normal":
from batchglm.api.models.tf1.glm_norm import Estimator, InputDataGLM
else:
raise ValueError('noise_model="%s" not recognized.' % noise_model)
if batch_size is None:
batch_size = 128
else:
if not isinstance(batch_size, int):
raise ValueError("batch_size has to be an integer if backend is tf1")
chunk_size_cells = int(1e9)
chunk_size_genes = 128
elif backend.lower() in ["tf2"]:
if noise_model == "nb" or noise_model == "negative_binomial":
from batchglm.api.models.tf2.glm_nb import Estimator, InputDataGLM
else:
raise ValueError('noise_model="%s" not recognized.' % noise_model)
if batch_size is None:
batch_size = 128
else:
if not isinstance(batch_size, int):
raise ValueError("batch_size has to be an integer if backend is tf2")
chunk_size_cells = int(1e9)
chunk_size_genes = 128
elif backend.lower() in ["numpy"]:
if isinstance(training_strategy, str):
if training_strategy.lower() == "auto":
training_strategy = "DEFAULT"
if noise_model == "nb" or noise_model == "negative_binomial":
from batchglm.api.models.numpy.glm_nb import Estimator, InputDataGLM
else:
raise ValueError('noise_model="%s" not recognized.' % noise_model)
# Set default chunk size:
if batch_size is None:
chunk_size_cells = int(1e9)
chunk_size_genes = 128
batch_size = (chunk_size_cells, chunk_size_genes)
else:
if isinstance(batch_size, int) or len(batch_size) != 2:
raise ValueError("batch_size has to be a tuple of length 2 if backend is numpy")
chunk_size_cells = batch_size[0]
chunk_size_genes = batch_size[1]
else:
raise ValueError('backend="%s" not recognized.' % backend)
input_data = InputDataGLM(
data=data,
design_loc=design_loc,
design_scale=design_scale,
design_loc_names=design_loc_names,
design_scale_names=design_scale_names,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
size_factors=size_factors,
feature_names=gene_names,
chunk_size_cells=chunk_size_cells,
chunk_size_genes=chunk_size_genes,
as_dask=backend.lower() in ["numpy"],
cast_dtype=dtype
)
# Assemble variable key word arguments to constructor of Estimator.
constructor_args = {}
if quick_scale is not None:
constructor_args["quick_scale"] = quick_scale
# Backend-specific constructor arguments:
if backend.lower() in ["tf1"]:
constructor_args['provide_optimizers'] = {
"gd": pkg_constants.BATCHGLM_OPTIM_GD,
"adam": pkg_constants.BATCHGLM_OPTIM_ADAM,
"adagrad": pkg_constants.BATCHGLM_OPTIM_ADAGRAD,
"rmsprop": pkg_constants.BATCHGLM_OPTIM_RMSPROP,
"nr": pkg_constants.BATCHGLM_OPTIM_NEWTON,
"nr_tr": pkg_constants.BATCHGLM_OPTIM_NEWTON_TR,
"irls": pkg_constants.BATCHGLM_OPTIM_IRLS,
"irls_gd": pkg_constants.BATCHGLM_OPTIM_IRLS_GD,
"irls_tr": pkg_constants.BATCHGLM_OPTIM_IRLS_TR,
"irls_gd_tr": pkg_constants.BATCHGLM_OPTIM_IRLS_GD_TR
}
constructor_args['provide_batched'] = pkg_constants.BATCHGLM_PROVIDE_BATCHED
constructor_args['provide_fim'] = pkg_constants.BATCHGLM_PROVIDE_FIM
constructor_args['provide_hessian'] = pkg_constants.BATCHGLM_PROVIDE_HESSIAN
constructor_args["batch_size"] = batch_size
elif backend.lower() not in ["tf2"]:
pass
elif backend.lower() not in ["numpy"]:
pass
else:
raise ValueError('backend="%s" not recognized.' % backend)
estim = Estimator(
input_data=input_data,
init_a=init_a,
init_b=init_b,
dtype=dtype,
**constructor_args
)
estim.initialize()
# Assemble backend specific key word arguments to training function:
if batch_size is not None:
train_args["batch_size"] = batch_size
if backend.lower() in ["tf1"]:
pass
elif backend.lower() in ["tf2"]:
train_args["autograd"] = pkg_constants.BATCHGLM_AUTOGRAD
train_args["featurewise"] = pkg_constants.BATCHGLM_FEATUREWISE
elif backend.lower() in ["numpy"]:
pass
estim.train_sequence(
training_strategy=training_strategy,
**train_args
)
if close_session:
estim.finalize()
return estim
def lrt(
data: Union[anndata.AnnData, Raw, np.ndarray, scipy.sparse.csr_matrix, glm.typing.InputDataBase],
full_formula_loc: str,
reduced_formula_loc: str,
full_formula_scale: str = "~1",
reduced_formula_scale: str = "~1",
as_numeric: Union[List[str], Tuple[str], str] = (),
init_a: Union[np.ndarray, str] = "AUTO",
init_b: Union[np.ndarray, str] = "AUTO",
gene_names: Union[np.ndarray, list] = None,
sample_description: pd.DataFrame = None,
noise_model="nb",
size_factors: Union[np.ndarray, pd.core.series.Series, np.ndarray] = None,
batch_size: Union[None, int, Tuple[int, int]] = None,
backend: str = "numpy",
train_args: dict = {},
training_strategy: Union[str, List[Dict[str, object]], Callable] = "AUTO",
quick_scale: bool = False,
dtype="float64",
**kwargs
):
"""
Perform log-likelihood ratio test for differential expression for each gene.
Note that lrt() does not support constraints in its current form. Please
use wald() for constraints.
:param data: Input data matrix (observations x features) or (cells x genes).
:param full_formula_loc: formula
Full model formula for location parameter model.
:param reduced_formula_loc: formula
Reduced model formula for location and scale parameter models.
:param full_formula_scale: formula
Full model formula for scale parameter model.
:param reduced_formula_scale: formula
Reduced model formula for scale parameter model.
:param as_numeric:
Which columns of sample_description to treat as numeric and
not as categorical. This yields columns in the design matrix
which do not correpond to one-hot encoded discrete factors.
This makes sense for number of genes, time, pseudotime or space
for example.
:param init_a: (Optional) Low-level initial values for a.
Can be:
- str:
* "auto": automatically choose best initialization
* "standard": initialize intercept with observed mean
* "init_model": initialize with another model (see `ìnit_model` parameter)
* "closed_form": try to initialize with closed form
- np.ndarray: direct initialization of 'a'
:param init_b: (Optional) Low-level initial values for b
Can be:
- str:
* "auto": automatically choose best initialization
* "standard": initialize with zeros
* "init_model": initialize with another model (see `ìnit_model` parameter)
* "closed_form": try to initialize with closed form
- np.ndarray: direct initialization of 'b'
:param gene_names: optional list/array of gene names which will be used if `data` does not implicitly store these
:param sample_description: optional pandas.DataFrame containing sample annotations
:param noise_model: str, noise model to use in model-based unit_test. Possible options:
- 'nb': default
:param size_factors: 1D array of transformed library size factors for each cell in the
same order as in data or string-type column identifier of size-factor containing
column in sample description.
:param batch_size: Argument controlling the memory load of the fitting procedure. For backends that allow
chunking of operations, this parameter controls the size of the batch / chunk.
- If backend is "tf1" or "tf2": number of observations per batch
- If backend is "numpy": Tuple of (number of observations per chunk, number of genes per chunk)
:param backend: Which linear algebra library to chose. This impact the available noise models and optimizers /
training strategies. Available are:
- "numpy" numpy
- "tf1" tensorflow1.* >= 1.13
- "tf2" tensorflow2.*
:param training_strategy: {str, function, list} training strategy to use. Can be:
- str: will use Estimator.TrainingStrategy[training_strategy] to train
- function: Can be used to implement custom training function will be called as
`training_strategy(estimator)`.
- list of keyword dicts containing method arguments: Will call Estimator.train() once with each dict of
method arguments.
Example:
.. code-block:: python
[
{"learning_rate": 0.5, },
{"learning_rate": 0.05, },
]
This will run training first with learning rate = 0.5 and then with learning rate = 0.05.
:param quick_scale: Depending on the optimizer, `scale` will be fitted faster and maybe less accurate.
Useful in scenarios where fitting the exact `scale` is not absolutely necessary.
:param dtype: Allows specifying the precision which should be used to fit data.
Should be "float32" for single precision or "float64" for double precision.
:param kwargs: [Debugging] Additional arguments will be passed to the _fit method.
"""
# TODO test nestedness
if len(kwargs) != 0:
logging.getLogger("diffxpy").info("additional kwargs: %s", str(kwargs))
if isinstance(as_numeric, str):
as_numeric = [as_numeric]
gene_names = parse_gene_names(data, gene_names)
sample_description = parse_sample_description(data, sample_description)
size_factors = parse_size_factors(
size_factors=size_factors,
data=data,
sample_description=sample_description
)
full_design_loc = glm.data.design_matrix(
sample_description=sample_description,
formula=full_formula_loc,
as_categorical=[False if x in as_numeric else True for x in sample_description.columns.values],
return_type="patsy"
)
reduced_design_loc = glm.data.design_matrix(
sample_description=sample_description,
formula=reduced_formula_loc,
as_categorical=[False if x in as_numeric else True for x in sample_description.columns.values],
return_type="patsy"
)
full_design_scale = glm.data.design_matrix(
sample_description=sample_description,
formula=full_formula_scale,
as_categorical=[False if x in as_numeric else True for x in sample_description.columns.values],
return_type="patsy"
)
reduced_design_scale = glm.data.design_matrix(
sample_description=sample_description,
formula=reduced_formula_scale,
as_categorical=[False if x in as_numeric else True for x in sample_description.columns.values],
return_type="patsy"
)
reduced_model = _fit(
noise_model=noise_model,
data=data,
design_loc=reduced_design_loc,
design_scale=reduced_design_scale,
constraints_loc=None,
constraints_scale=None,
init_a=init_a,
init_b=init_b,
gene_names=gene_names,
size_factors=size_factors,
batch_size=batch_size,
backend=backend,
train_args=train_args,
training_strategy=training_strategy,
quick_scale=quick_scale,
dtype=dtype,
**kwargs
)
full_model = _fit(
noise_model=noise_model,
data=data,
design_loc=full_design_loc,
design_scale=full_design_scale,
constraints_loc=None,
constraints_scale=None,
gene_names=gene_names,
init_a="init_model",
init_b="init_model",
init_model=reduced_model,
size_factors=size_factors,
batch_size=batch_size,
backend=backend,
train_args=train_args,
training_strategy=training_strategy,
quick_scale=quick_scale,
dtype=dtype,
**kwargs
)
de_test = DifferentialExpressionTestLRT(
sample_description=sample_description,
full_design_loc_info=full_design_loc.design_info,
full_estim=full_model,
reduced_design_loc_info=reduced_design_loc.design_info,
reduced_estim=reduced_model,
)
return de_test
def wald(
data: Union[anndata.AnnData, Raw, np.ndarray, scipy.sparse.csr_matrix, glm.typing.InputDataBase],
factor_loc_totest: Union[str, List[str]] = None,
coef_to_test: Union[str, List[str]] = None,
formula_loc: Union[None, str] = None,
formula_scale: Union[None, str] = "~1",
as_numeric: Union[List[str], Tuple[str], str] = (),
init_a: Union[np.ndarray, str] = "AUTO",
init_b: Union[np.ndarray, str] = "AUTO",
gene_names: Union[np.ndarray, list] = None,
sample_description: Union[None, pd.DataFrame] = None,
dmat_loc: Union[patsy.design_info.DesignMatrix] = None,
dmat_scale: Union[patsy.design_info.DesignMatrix] = None,
constraints_loc: Union[None, List[str], Tuple[str, str], dict, np.ndarray] = None,
constraints_scale: Union[None, List[str], Tuple[str, str], dict, np.ndarray] = None,
noise_model: str = "nb",
size_factors: Union[np.ndarray, pd.core.series.Series, str] = None,
batch_size: Union[None, int, Tuple[int, int]] = None,
backend: str = "numpy",
train_args: dict = {},
training_strategy: Union[str, List[Dict[str, object]], Callable] = "AUTO",
quick_scale: bool = False,
dtype="float64",
**kwargs
):
"""
Perform Wald test for differential expression for each gene.
:param data: Input data matrix (observations x features) or (cells x genes).
:param factor_loc_totest: str, list of strings
List of factors of formula to test with Wald test.
E.g. "condition" or ["batch", "condition"] if formula_loc would be "~ 1 + batch + condition"
:param coef_to_test:
If there are more than two groups specified by `factor_loc_totest`,
this parameter allows to specify the group which should be tested.
Alternatively, if factor_loc_totest is not given, this list sets
the exact coefficients which are to be tested.
:param formula_loc: formula
model formula for location and scale parameter models.
:param formula_scale: formula
model formula for scale parameter model.
:param as_numeric:
Which columns of sample_description to treat as numeric and
not as categorical. This yields columns in the design matrix
which do not correspond to one-hot encoded discrete factors.
This makes sense for number of genes, time, pseudotime or space
for example.
:param init_a: (Optional) Low-level initial values for a.
Can be:
- str:
* "auto": automatically choose best initialization
* "standard": initialize intercept with observed mean
* "closed_form": try to initialize with closed form
- np.ndarray: direct initialization of 'a'
:param init_b: (Optional) Low-level initial values for b
Can be:
- str:
* "auto": automatically choose best initialization
* "standard": initialize with zeros
* "closed_form": try to initialize with closed form
- np.ndarray: direct initialization of 'b'
:param gene_names: optional list/array of gene names which will be used if `data` does not implicitly store these
:param sample_description: optional pandas.DataFrame containing sample annotations
:param dmat_loc: Pre-built location model design matrix.
This over-rides formula_loc and sample description information given in
data or sample_description.
:param dmat_scale: Pre-built scale model design matrix.
This over-rides formula_scale and sample description information given in
data or sample_description.
:param constraints_loc: Constraints for location model. Can be one of the following:
- np.ndarray:
Array with constraints in rows and model parameters in columns.
Each constraint contains non-zero entries for the a of parameters that
has to sum to zero. This constraint is enforced by binding one parameter
to the negative sum of the other parameters, effectively representing that
parameter as a function of the other parameters. This dependent
parameter is indicated by a -1 in this array, the independent parameters
of that constraint (which may be dependent at an earlier constraint)
are indicated by a 1. You should only use this option
together with prebuilt design matrix for the location model, dmat_loc,
for example via de.utils.setup_constrained().
- dict:
Every element of the dictionary corresponds to one set of equality constraints.
Each set has to be be an entry of the form {..., x: y, ...}
where x is the factor to be constrained and y is a factor by which levels of x are grouped
and then constrained. Set y="1" to constrain all levels of x to sum to one,
a single equality constraint.
E.g.: {"batch": "condition"} Batch levels within each condition are constrained to sum to
zero. This is applicable if repeats of a an experiment within each condition
are independent so that the set-up ~1+condition+batch is perfectly confounded.
Can only group by non-constrained effects right now, use constraint_matrix_from_string
for other cases.
- list of strings or tuple of strings:
String encoded equality constraints.
E.g. ["batch1 + batch2 + batch3 = 0"]
- None:
No constraints are used, this is equivalent to using an identity matrix as a
constraint matrix.
:param constraints_scale: Constraints for scale model. Can be one of the following:
- np.ndarray:
Array with constraints in rows and model parameters in columns.
Each constraint contains non-zero entries for the a of parameters that
has to sum to zero. This constraint is enforced by binding one parameter
to the negative sum of the other parameters, effectively representing that
parameter as a function of the other parameters. This dependent
parameter is indicated by a -1 in this array, the independent parameters
of that constraint (which may be dependent at an earlier constraint)
are indicated by a 1. You should only use this option
together with prebuilt design matrix for the scale model, dmat_scale,
for example via de.utils.setup_constrained().
- dict:
Every element of the dictionary corresponds to one set of equality constraints.
Each set has to be be an entry of the form {..., x: y, ...}
where x is the factor to be constrained and y is a factor by which levels of x are grouped
and then constrained. Set y="1" to constrain all levels of x to sum to one,
a single equality constraint.
E.g.: {"batch": "condition"} Batch levels within each condition are constrained to sum to
zero. This is applicable if repeats of a an experiment within each condition
are independent so that the set-up ~1+condition+batch is perfectly confounded.
Can only group by non-constrained effects right now, use constraint_matrix_from_string
for other cases.
- list of strings or tuple of strings:
String encoded equality constraints.
E.g. ["batch1 + batch2 + batch3 = 0"]
- None:
No constraints are used, this is equivalent to using an identity matrix as a
constraint matrix.
:param size_factors: 1D array of transformed library size factors for each cell in the
same order as in data or string-type column identifier of size-factor containing
column in sample description.
:param noise_model: str, noise model to use in model-based unit_test. Possible options:
- 'nb': default
:param batch_size: Argument controlling the memory load of the fitting procedure. For backends that allow
chunking of operations, this parameter controls the size of the batch / chunk.
- If backend is "tf1" or "tf2": number of observations per batch
- If backend is "numpy": Tuple of (number of observations per chunk, number of genes per chunk)
:param backend: Which linear algebra library to chose. This impact the available noise models and optimizers /
training strategies. Available are:
- "numpy" numpy
- "tf1" tensorflow1.* >= 1.13
- "tf2" tensorflow2.*
:param training_strategy: {str, function, list} training strategy to use. Can be:
- str: will use Estimator.TrainingStrategy[training_strategy] to train
- function: Can be used to implement custom training function will be called as
`training_strategy(estimator)`.
- list of keyword dicts containing method arguments: Will call Estimator.train() once with each dict of
method arguments.
:param quick_scale: Depending on the optimizer, `scale` will be fitted faster and maybe less accurate.
Useful in scenarios where fitting the exact `scale` is not absolutely necessary.
:param dtype: Allows specifying the precision which should be used to fit data.
Should be "float32" for single precision or "float64" for double precision.
:param kwargs: [Debugging] Additional arguments will be passed to the _fit method.
"""
if len(kwargs) != 0:
logging.getLogger("diffxpy").debug("additional kwargs: %s", str(kwargs))
if (dmat_loc is None and formula_loc is None) or \
(dmat_loc is not None and formula_loc is not None):
raise ValueError("Supply either dmat_loc or formula_loc.")
if (dmat_scale is None and formula_scale is None) or \
(dmat_scale is not None and formula_scale != "~1"):
raise ValueError("Supply either dmat_scale or formula_scale.")
if dmat_loc is not None and factor_loc_totest is not None:
raise ValueError("Supply coef_to_test and not factor_loc_totest if dmat_loc is supplied.")
# Check that factor_loc_totest and coef_to_test are lists and not single strings:
if isinstance(factor_loc_totest, str):
factor_loc_totest = [factor_loc_totest]
if isinstance(coef_to_test, str):
coef_to_test = [coef_to_test]
if isinstance(as_numeric, str):
as_numeric = [as_numeric]
# Parse input data formats:
gene_names = parse_gene_names(data, gene_names)
if dmat_loc is None and dmat_scale is None:
sample_description = parse_sample_description(data, sample_description)
size_factors = parse_size_factors(
size_factors=size_factors,
data=data,
sample_description=sample_description
)
# Build design matrices and constraints.
design_loc, design_loc_names, constraints_loc, term_names_loc = constraint_system_from_star(
dmat=dmat_loc,
sample_description=sample_description,
formula=formula_loc,
as_numeric=as_numeric,
constraints=constraints_loc,
return_type="patsy"
)
design_scale, design_scale_names, constraints_scale, term_names_scale = constraint_system_from_star(
dmat=dmat_scale,
sample_description=sample_description,
formula=formula_scale,
as_numeric=as_numeric,
constraints=constraints_scale,
return_type="patsy"
)
# Define indices of coefficients to test:
constraints_loc_temp = constraints_loc if constraints_loc is not None else np.eye(design_loc.shape[-1])
# Check that design_loc is patsy, otherwise use term_names for slicing.
if factor_loc_totest is not None:
if not isinstance(design_loc, patsy.design_info.DesignMatrix):
col_indices = np.where([
x in factor_loc_totest
for x in term_names_loc
])[0]
else:
# Select coefficients to test via formula model:
col_indices = np.concatenate([
np.arange(design_loc.shape[-1])[design_loc.design_info.slice(x)]
for x in factor_loc_totest
])
assert len(col_indices) > 0, "Could not find any matching columns!"
if coef_to_test is not None:
if len(factor_loc_totest) > 1:
raise ValueError("do not set coef_to_test if more than one factor_loc_totest is given")
samples = sample_description[factor_loc_totest].astype(type(coef_to_test)) == coef_to_test
one_cols = | np.where(design_loc[samples][:, col_indices][0] == 1) | numpy.where |
"""
Name: FissionsAdd
breif: Adding fission particles to phase vectors for MCDC-TNT
Author: <NAME> (OR State Univ - <EMAIL>) CEMeNT
Date: Nov 18th 2021
"""
import numpy as np
def FissionsAdd(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive,
fis_count, nu_new_neutrons, fission_event_index, num_part, particle_speed, rands):
"""
Run advance for a
Parameters
----------
p_pos_x : vector double
PSV: x position of phase space particles (index is particle value).
p_pos_y : vector double
PSV: y position of phase space particles (index is particle value).
p_pos_z : vector double
PSV: z position of phase space particles (index is particle value).
p_mesh_cell : vector int
PSV: mesh cell location of a given particle.
p_dir_y : vector double
PSV: y direction unit value of phase space particles (index is particle value).
p_dir_z : vector double
PSV: z direction unit value of phase space particles (index is particle value).
p_dir_x : vector double
PSV: x direction unit value of phase space particles (index is particle value).
p_speed : vector double
PSV: speed (energy) or a particle (index is particle).
p_time : vector double
PSV: particle clock.
p_alive : vector bool
PSV: is it alive?
fis_count : int
how many fissions where recorded in smaple event.
nu_new_neutrons : int
how many neutrons produced per fission.
fission_event_index : vector int
indicies of particles that underwent fission after sample event.
num_part : int
number of particles currently under transport (indxed form 1).
particle_speed : double
speed of fissioned particles.
rands : vector double
produced from an rng, needs to be fis_count*nu*2.
Returns
-------
Phase space variables with new fissions added.
"""
k=0 #index for fission temp vectors
for i in range(fis_count):
for j in range(nu_new_neutrons):
# Position
p_pos_x[k+num_part] = p_pos_x[fission_event_index[i]]
p_mesh_cell[k+num_part] = p_mesh_cell[fission_event_index[i]]
p_pos_y[k+num_part] = p_pos_y[fission_event_index[i]]
p_pos_z[k+num_part] = p_pos_z[fission_event_index[i]]
# print("fission particle produced")
# print("from particle {0} and indexed as particle {1}".format(fission_event_index[i], k+num_part))
# print("produced at: {0}".format(p_pos_x[k+num_part]))
# Direction
# Sample polar and azimuthal angles uniformly
mu = 2.0*rands[4*i+2*j] - 1.0
azi = 2.0*rands[4*i+2*j+1]
# Convert to Cartesian coordinate
c = (1.0 - mu**2)**0.5
p_dir_y[k+num_part] = np.cos(azi)*c
p_dir_z[k+num_part] = np.sin(azi)*c
p_dir_x[k+num_part] = mu
# Speed
p_speed[k+num_part] = particle_speed
# Time
p_time[k+num_part] = p_time[fission_event_index[i]]
# Flags
p_alive[k+num_part] = True
k+=1
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, k)
def test_FissionsAdd():
L = 1
dx = .25
N_m = 4
num_part = 3
p_pos_x = np.array([.55, 3, 5])
p_pos_y = np.array([10, 3, 5])
p_pos_z = np.array([15, 3, 5])
p_mesh_cell = np.array([2, 87, -1])
p_dir_x = | np.ones(num_part) | numpy.ones |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.